zip
stringlengths 19
109
| filename
stringlengths 4
185
| contents
stringlengths 0
30.1M
| type_annotations
sequencelengths 0
1.97k
| type_annotation_starts
sequencelengths 0
1.97k
| type_annotation_ends
sequencelengths 0
1.97k
|
---|---|---|---|---|---|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/lib/test_server.py |
import os
import subprocess
import sys
import time
from contextlib import contextmanager
from typing import (Any, Iterator, Optional)
# Verify the Zulip venv is available.
from tools.lib import sanity_check
sanity_check.check_venv(__file__)
import django
import requests
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if TOOLS_DIR not in sys.path:
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from zerver.lib.test_fixtures import run_generate_fixtures_if_required
def set_up_django(external_host):
# type: (str) -> None
os.environ['EXTERNAL_HOST'] = external_host
os.environ["TORNADO_SERVER"] = "http://127.0.0.1:9983"
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
django.setup()
os.environ['PYTHONUNBUFFERED'] = 'y'
def assert_server_running(server, log_file):
# type: (subprocess.Popen, Optional[str]) -> None
"""Get the exit code of the server, or None if it is still running."""
if server.poll() is not None:
message = 'Server died unexpectedly!'
if log_file:
message += '\nSee %s\n' % (log_file,)
raise RuntimeError(message)
def server_is_up(server, log_file):
# type: (subprocess.Popen, Optional[str]) -> bool
assert_server_running(server, log_file)
try:
# We could get a 501 error if the reverse proxy is up but the Django app isn't.
return requests.get('http://127.0.0.1:9981/accounts/home').status_code == 200
except Exception:
return False
@contextmanager
def test_server_running(force: bool=False, external_host: str='testserver',
log_file: Optional[str]=None, dots: bool=False, use_db: bool=True
) -> Iterator[None]:
log = sys.stdout
if log_file:
if os.path.exists(log_file) and os.path.getsize(log_file) < 100000:
log = open(log_file, 'a')
log.write('\n\n')
else:
log = open(log_file, 'w')
set_up_django(external_host)
if use_db:
run_generate_fixtures_if_required()
# Run this not through the shell, so that we have the actual PID.
run_dev_server_command = ['tools/run-dev.py', '--test']
if force:
run_dev_server_command.append('--force')
server = subprocess.Popen(run_dev_server_command,
stdout=log, stderr=log)
try:
# Wait for the server to start up.
sys.stdout.write('\nWaiting for test server (may take a while)')
if not dots:
sys.stdout.write('\n\n')
while not server_is_up(server, log_file):
if dots:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\n\n--- SERVER IS UP! ---\n\n')
# DO OUR ACTUAL TESTING HERE!!!
yield
finally:
assert_server_running(server, log_file)
server.terminate()
if __name__ == '__main__':
# The code below is for testing this module works
with test_server_running():
print('\n\n SERVER IS UP!\n\n')
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/linter_lib/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/linter_lib/custom_check.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import traceback
from zulint.printer import print_err, colors
from typing import cast, Any, Callable, Dict, List, Optional, Tuple, Iterable
Rule = Dict[str, Any]
RuleList = List[Dict[str, Any]]
LineTup = Tuple[int, str, str, str]
FILES_WITH_LEGACY_SUBJECT = {
# This basically requires a big DB migration:
'zerver/lib/topic.py',
# This is for backward compatibility.
'zerver/tests/test_legacy_subject.py',
# Other migration-related changes require extreme care.
'zerver/lib/fix_unreads.py',
'zerver/tests/test_migrations.py',
# These use subject in the email sense, and will
# probably always be exempt:
'zerver/lib/email_mirror.py',
'zerver/lib/error_notify.py',
'zerver/lib/feedback.py',
'zerver/lib/send_email.py',
'zerver/tests/test_new_users.py',
# These are tied more to our API than our DB model.
'zerver/lib/api_test_helpers.py',
# TRY TO FIX THESE! If you can't fix them, try to
# add comments here and/or in the file itself about
# why sweeping subject is tricky.
'zerver/lib/stream_topic.py',
# This has lots of query data embedded, so it's hard
# to fix everything until we migrate the DB to "topic".
'zerver/tests/test_narrow.py',
}
def get_line_info_from_file(fn: str) -> List[LineTup]:
line_tups = []
for i, line in enumerate(open(fn)):
line_newline_stripped = line.strip('\n')
line_fully_stripped = line_newline_stripped.strip()
if line_fully_stripped.endswith(' # nolint'):
continue
tup = (i, line, line_newline_stripped, line_fully_stripped)
line_tups.append(tup)
return line_tups
def get_rules_applying_to_fn(fn: str, rules: RuleList) -> RuleList:
rules_to_apply = []
for rule in rules:
excluded = False
for item in rule.get('exclude', set()):
if fn.startswith(item):
excluded = True
break
if excluded:
continue
if rule.get("include_only"):
found = False
for item in rule.get("include_only", set()):
if item in fn:
found = True
if not found:
continue
rules_to_apply.append(rule)
return rules_to_apply
def check_file_for_pattern(fn: str,
line_tups: List[LineTup],
identifier: str,
color: Optional[Iterable[str]],
rule: Rule) -> bool:
'''
DO NOT MODIFY THIS FUNCTION WITHOUT PROFILING.
This function gets called ~40k times, once per file per regex.
Inside it's doing a regex check for every line in the file, so
it's important to do things like pre-compiling regexes.
DO NOT INLINE THIS FUNCTION.
We need to see it show up in profiles, and the function call
overhead will never be a bottleneck.
'''
exclude_lines = {
line for
(exclude_fn, line) in rule.get('exclude_line', set())
if exclude_fn == fn
}
pattern = re.compile(rule['pattern'])
strip_rule = rule.get('strip') # type: Optional[str]
ok = True
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if line_fully_stripped in exclude_lines:
exclude_lines.remove(line_fully_stripped)
continue
try:
line_to_check = line_fully_stripped
if strip_rule is not None:
if strip_rule == '\n':
line_to_check = line_newline_stripped
else:
raise Exception("Invalid strip rule")
if pattern.search(line_to_check):
if rule.get("exclude_pattern"):
if re.search(rule['exclude_pattern'], line_to_check):
continue
print_err(identifier, color, '{} at {} line {}:'.format(
rule['description'], fn, i+1))
print_err(identifier, color, line)
ok = False
except Exception:
print("Exception with %s at %s line %s" % (rule['pattern'], fn, i+1))
traceback.print_exc()
if exclude_lines:
print('Please remove exclusions for file %s: %s' % (fn, exclude_lines))
return ok
def custom_check_file(fn: str,
identifier: str,
rules: RuleList,
color: Optional[Iterable[str]],
max_length: Optional[int]=None) -> bool:
failed = False
line_tups = get_line_info_from_file(fn=fn)
rules_to_apply = get_rules_applying_to_fn(fn=fn, rules=rules)
for rule in rules_to_apply:
ok = check_file_for_pattern(
fn=fn,
line_tups=line_tups,
identifier=identifier,
color=color,
rule=rule,
)
if not ok:
failed = True
# TODO: Move the below into more of a framework.
firstline = None
lastLine = None
if line_tups:
firstline = line_tups[0][3] # line_fully_stripped for the first line.
lastLine = line_tups[-1][1]
if max_length is not None:
ok = check_file_for_long_lines(
fn=fn,
max_length=max_length,
line_tups=line_tups,
)
if not ok:
failed = True
if firstline:
if os.path.splitext(fn)[1] and 'zerver/' in fn:
shebang_rules = [{'pattern': '^#!',
'description': "zerver library code shouldn't have a shebang line."}]
else:
shebang_rules = [{'pattern': '#!/usr/bin/python',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/python`"},
{'pattern': '#!/usr/bin/env python$',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}]
for rule in shebang_rules:
if re.search(rule['pattern'], firstline):
print_err(identifier, color,
'{} at {} line 1:'.format(rule['description'], fn))
print_err(identifier, color, firstline)
failed = True
if lastLine and ('\n' not in lastLine):
print("No newline at the end of file. Fix with `sed -i '$a\\' %s`" % (fn,))
failed = True
return failed
def check_file_for_long_lines(fn: str,
max_length: int,
line_tups: List[LineTup]) -> bool:
ok = True
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if isinstance(line, bytes):
line_length = len(line.decode("utf-8"))
else:
line_length = len(line)
if (line_length > max_length and
'# type' not in line and 'test' not in fn and 'example' not in fn and
# Don't throw errors for markdown format URLs
not re.search(r"^\[[ A-Za-z0-9_:,&()-]*\]: http.*", line) and
# Don't throw errors for URLs in code comments
not re.search(r"[#].*http.*", line) and
not re.search(r"`\{\{ api_url \}\}[^`]+`", line) and
"# ignorelongline" not in line and 'migrations' not in fn):
print("Line too long (%s) at %s line %s: %s" % (len(line), fn, i+1, line_newline_stripped))
ok = False
return ok
def build_custom_checkers(by_lang):
# type: (Dict[str, List[str]]) -> Tuple[Callable[[], bool], Callable[[], bool]]
# By default, a rule applies to all files within the extension for which it is specified (e.g. all .py files)
# There are three operators we can use to manually include or exclude files from linting for a rule:
# 'exclude': 'set([<path>, ...])' - if <path> is a filename, excludes that file.
# if <path> is a directory, excludes all files directly below the directory <path>.
# 'exclude_line': 'set([(<path>, <line>), ...])' - excludes all lines matching <line> in the file <path> from linting.
# 'include_only': 'set([<path>, ...])' - includes only those files where <path> is a substring of the filepath.
trailing_whitespace_rule = {
'pattern': r'\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
}
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{'pattern': 'http://zulip.readthedocs.io',
'description': 'Use HTTPS when linking to ReadTheDocs',
},
{'pattern': '\t',
'strip': '\n',
'exclude': set(['tools/travis/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
] # type: RuleList
comma_whitespace_rule = [
{'pattern': ', {2,}[^#/ ]',
'exclude': set(['zerver/tests', 'frontend_tests/node_tests']),
'description': "Remove multiple whitespaces after ','",
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
] # type: RuleList
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading',
'good_lines': ['### some heading', '# another heading'],
'bad_lines': ['###some heading', '#another heading']},
] # type: RuleList
js_rules = cast(RuleList, [
{'pattern': r'[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': r'.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': r'i18n\.t\([^)]+[^,\{\)]$',
'description': 'i18n string should not be a multiline string'},
{'pattern': r'''i18n\.t\(['"].+?['"]\s*\+''',
'description': 'Do not concatenate arguments within i18n.t()'},
{'pattern': r'i18n\.t\(.+\).*\+',
'description': 'Do not concatenate i18n strings'},
{'pattern': r'\+.*i18n\.t\(.+\)',
'description': 'Do not concatenate i18n strings'},
{'pattern': '[.]includes[(]',
'exclude': ['frontend_tests/'],
'description': '.includes() is incompatible with Internet Explorer. Use .indexOf() !== -1 instead.'},
{'pattern': '[.]html[(]',
'exclude_pattern': '[.]html[(]("|\'|templates|html|message.content|sub.rendered_description|i18n.t|rendered_|$|[)]|error_text|widget_elem|[$]error|[$][(]"<p>"[)])',
'exclude': ['static/js/portico', 'static/js/lightbox.js', 'static/js/ui_report.js',
'static/js/confirm_dialog.js',
'frontend_tests/'],
'description': 'Setting HTML content with jQuery .html() can lead to XSS security bugs. Consider .text() or using rendered_foo as a variable name if content comes from handlebars and thus is already sanitized.'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': r'const\s',
'exclude': set(['frontend_tests/zjsunit',
'frontend_tests/node_tests',
'static/js/portico',
'tools/']),
'description': 'Avoid ES6 constructs until we upgrade our pipeline.'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js',
'tools/setup/generate-custom-icon-webfont']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': r'''[.]text\(["'][a-zA-Z]''',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization',
'exclude': set(['frontend_tests/node_tests/'])},
{'pattern': r'''compose_error\(["']''',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': r'''report.success\(["']''',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': r'''report.error\(["']''',
'description': 'Argument to report_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'\$\(document\)\.ready\(',
'description': "`Use $(f) rather than `$(document).ready(f)`",
'good_lines': ['$(function () {foo();}'],
'bad_lines': ['$(document).ready(function () {foo();}']},
{'pattern': '[$][.](get|post|patch|delete|ajax)[(]',
'description': "Use channel module for AJAX calls",
'exclude': set([
# Internal modules can do direct network calls
'static/js/blueslip.js',
'static/js/channel.js',
# External modules that don't include channel.js
'static/js/stats/',
'static/js/portico/',
'static/js/billing/',
]),
'good_lines': ['channel.get(...)'],
'bad_lines': ['$.get()', '$.post()', '$.ajax()']},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude': set([
'frontend_tests/node_tests/copy_and_paste.js',
'frontend_tests/node_tests/upload.js',
'frontend_tests/node_tests/templates.js',
'static/js/upload.js',
'static/js/stream_color.js',
]),
'good_lines': ['#my-style {color: blue;}'],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
]) + whitespace_rules + comma_whitespace_rule
python_rules = cast(RuleList, [
{'pattern': 'subject|SUBJECT',
'exclude_pattern': 'subject to the|email',
'description': 'avoid subject as a var',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN'],
'exclude': FILES_WITH_LEGACY_SUBJECT,
'include_only': set([
'zerver/data_import/',
'zerver/lib/',
'zerver/tests/',
'zerver/views/'])},
{'pattern': '^(?!#)@login_required',
'description': '@login_required is unsupported; use @zulip_login_required',
'good_lines': ['@zulip_login_required', '# foo @login_required'],
'bad_lines': ['@login_required', ' @login_required']},
{'pattern': '^user_profile[.]save[(][)]',
'description': 'Always pass update_fields when saving user_profile objects',
'exclude_line': set([
('zerver/lib/actions.py', "user_profile.save() # Can't use update_fields because of how the foreign key works."),
]),
'exclude': set(['zerver/tests', 'zerver/lib/create_user.py']),
'good_lines': ['user_profile.save(update_fields=["pointer"])'],
'bad_lines': ['user_profile.save()']},
{'pattern': r'^[^"]*"[^"]*"%\(',
'description': 'Missing space around "%"',
'good_lines': ['"%s" % ("foo")', '"%s" % (foo)'],
'bad_lines': ['"%s"%("foo")', '"%s"%(foo)']},
{'pattern': r"^[^']*'[^']*'%\(",
'description': 'Missing space around "%"',
'good_lines': ["'%s' % ('foo')", "'%s' % (foo)"],
'bad_lines': ["'%s'%('foo')", "'%s'%(foo)"]},
{'pattern': 'self: Any',
'description': 'you can omit Any annotation for self',
'good_lines': ['def foo (self):'],
'bad_lines': ['def foo(self: Any):']},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="',
'good_lines': ['a = b', '5 == 6'],
'bad_lines': ['a =b', 'asdf =42']},
{'pattern': r'":\w[^"]*$',
'description': 'Missing whitespace after ":"',
'good_lines': ['"foo": bar', '"some:string:with:colons"'],
'bad_lines': ['"foo":bar', '"foo":1']},
{'pattern': r"':\w[^']*$",
'description': 'Missing whitespace after ":"',
'good_lines': ["'foo': bar", "'some:string:with:colons'"],
'bad_lines': ["'foo':bar", "'foo':1"]},
{'pattern': r"^\s+#\w",
'strip': '\n',
'exclude': set(['tools/droplets/create.py']),
'description': 'Missing whitespace after "#"',
'good_lines': ['a = b # some operation', '1+2 # 3 is the result'],
'bad_lines': [' #some operation', ' #not valid!!!']},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).',
'good_lines': ['assertEqual(1, 2)'],
'bad_lines': ['assertEquals(1, 2)']},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None',
'good_lines': ['if foo is None'],
'bad_lines': ['foo == None']},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation',
'good_lines': ['# type: (Any, Any)', 'colon:separated:string:containing:type:as:keyword'],
'bad_lines': ['# type:(Any, Any)']},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"',
'good_lines': ['foo = bar # type: ignore # explanation'],
'bad_lines': ['foo = bar # type: ignore']},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation',
'good_lines': ['foo = 42 # type: int', '# type: (str, int) -> None'],
'bad_lines': ['# type (str, int) -> None']},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation',
'good_lines': ['foo = 42 # type: int'],
'bad_lines': ['foo = 42 #type: int']},
{'pattern': r'\b(if|else|while)[(]',
'description': 'Put a space between statements like if, else, etc. and (.',
'good_lines': ['if (1 == 2):', 'while (foo == bar):'],
'bad_lines': ['if(1 == 2):', 'while(foo == bar):']},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"',
'good_lines': ['foo = (1, 2, 3,)', 'foo(bar, 42)'],
'bad_lines': ['foo = (1, 2, 3, )']},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % ("baz",)']},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': r''' % [a-zA-Z0-9_."']*\)?$''',
'exclude_line': set([
('tools/tests/test_template_parser.py', '{% foo'),
]),
'description': 'Used % comprehension without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % "baz"']},
{'pattern': r'''.*%s.* % \([a-zA-Z0-9_."']*\)$''',
'description': 'Used % comprehension without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)"'],
'bad_lines': ['"foo %s bar" % ("baz")']},
{'pattern': 'sudo',
'include_only': set(['scripts/']),
'exclude': set(['scripts/lib/setup_venv.py']),
'exclude_line': set([
('scripts/lib/zulip_tools.py', '# We need sudo here, since the path will be under /srv/ in the'),
('scripts/lib/zulip_tools.py', 'subprocess.check_call(["sudo", "/bin/bash", "-c",'),
('scripts/lib/zulip_tools.py', 'subprocess.check_call(["sudo", "rm", "-rf", directory])'),
]),
'description': 'Most scripts are intended to run on systems without sudo.',
'good_lines': ['subprocess.check_call(["ls"])'],
'bad_lines': ['subprocess.check_call(["sudo", "ls"])']},
{'pattern': 'django.utils.translation',
'include_only': set(['test/']),
'description': 'Test strings should not be tagged for translation',
'good_lines': [''],
'bad_lines': ['django.utils.translation']},
{'pattern': 'userid',
'description': 'We prefer user_id over userid.',
'good_lines': ['id = alice.user_id'],
'bad_lines': ['id = alice.userid']},
{'pattern': r'json_success\({}\)',
'description': 'Use json_success() to return nothing',
'good_lines': ['return json_success()'],
'bad_lines': ['return json_success({})']},
{'pattern': r'\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should be a literal string enclosed by _()',
'good_lines': ['return json_error(_("string"))'],
'bad_lines': ['return json_error(_variable)', 'return json_error(_(variable))']},
{'pattern': r'''\Wjson_error\(['"].+[),]$''',
'exclude': set(['zerver/tests']),
'exclude_line': set([
# We don't want this string tagged for translation.
('zerver/views/compatibility.py', 'return json_error("Client is too old")'),
]),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': r'\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''\WJsonableError\(["'].+\)''',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''([a-zA-Z0-9_]+)=REQ\(['"]\1['"]''',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': r'self\.client\.(get|post|patch|put|delete)',
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r]Message.objects.get',
'exclude': set(["zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': 'Stream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
'exclude_line': set([
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(admin_realm_signup_notifications_stream, admin_realm)'),
# Here we need get_stream to access streams you've since unsubscribed from.
('zerver/views/messages.py', 'stream = get_stream(operand, self.user_profile.realm)'),
# Use stream_id to exclude mutes.
('zerver/views/messages.py', 'stream_id = get_stream(stream_name, user_profile.realm).id'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'Stream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set([
'zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py',
'zerver/migrations/0104_fix_unreads.py',
'pgroonga/migrations/0002_html_escape_subject.py',
]),
'description': "Don't import models or other code in migrations; see docs/subsystems/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html#naive-datetime-objects",
},
{'pattern': r'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
{'pattern': 'from os.path',
'description': "Don't use from when importing from the standard library",
},
{'pattern': 'import os.path',
'description': "Use import os instead of import os.path",
},
{'pattern': r'(logging|logger)\.warn\W',
'description': "Logger.warn is a deprecated alias for Logger.warning; Use 'warning' instead of 'warn'.",
'good_lines': ["logging.warning('I am a warning.')", "logger.warning('warning')"],
'bad_lines': ["logging.warn('I am a warning.')", "logger.warn('warning')"]},
{'pattern': r'\.pk',
'exclude_pattern': '[.]_meta[.]pk',
'description': "Use `id` instead of `pk`.",
'good_lines': ['if my_django_model.id == 42', 'self.user_profile._meta.pk'],
'bad_lines': ['if my_django_model.pk == 42']},
{'pattern': r'^[ ]*# type: \(',
'exclude': set([
# These directories, especially scripts/ and puppet/,
# have tools that need to run before a Zulip environment
# is provisioned; in some of those, the `typing` module
# might not be available yet, so care is required.
'scripts/',
'tools/',
'puppet/',
# Zerver files that we should just clean.
'zerver/tests',
'zerver/lib/api_test_helpers.py',
'zerver/lib/request.py',
'zerver/views/streams.py',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': 'Comment-style function type annotation. Use Python3 style annotations instead.',
},
{'pattern': r' = models[.].*null=True.*\) # type: (?!Optional)',
'include_only': {"zerver/models.py"},
'description': 'Model variable with null=true not annotated as Optional.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.CharField(null=True) # type: Text',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Stream'],
},
{'pattern': r' = models[.](?!NullBoolean).*\) # type: Optional', # Optional tag, except NullBoolean(Field)
'exclude_pattern': 'null=True',
'include_only': {"zerver/models.py"},
'description': 'Model variable annotated with Optional but variable does not have null=true.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.TextField() # type: Optional[Text]',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Optional[Stream]'],
},
{'pattern': r'[\s([]Text([^\s\w]|$)',
'exclude': set([
# We are likely to want to keep these dirs Python 2+3 compatible,
# since the plan includes extracting them to a separate project eventually.
'tools/lib',
'tools/zulint',
# TODO: Update our migrations from Text->str.
'zerver/migrations/',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.",
},
]) + whitespace_rules + comma_whitespace_rule
bash_rules = cast(RuleList, [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
{'pattern': 'sudo',
'description': 'Most scripts are intended to work on systems without sudo',
'include_only': set(['scripts/']),
'exclude': set([
'scripts/lib/install',
'scripts/lib/create-zulip-admin',
'scripts/setup/terminate-psql-sessions',
'scripts/setup/configure-rabbitmq'
]), },
]) + whitespace_rules[0:1]
css_rules = cast(RuleList, [
{'pattern': r'calc\([^+]+\+[^+]+\)',
'description': "Avoid using calc with '+' operator. See #8403 : in CSS.",
'good_lines': ["width: calc(20% - -14px);"],
'bad_lines': ["width: calc(20% + 14px);"]},
{'pattern': r'^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS",
'good_lines': ["background-color: white;", "text-size: 16px;"],
'bad_lines': ["background-color:white;", "text-size:16px;"]},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS.",
'good_lines': ["input {", "body {"],
'bad_lines': ["input{", "body{"]},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources",
'good_lines': ['background: url(/static/images/landing-page/pycon.jpg);'],
'bad_lines': ['background: url(https://example.com/image.png);']},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'strip': '\n',
'good_lines': [" color: white;", "color: white;"],
'bad_lines': [" color: white;"]},
{'pattern': r'{\w',
'description': "Missing whitespace after '{' in CSS (should be newline).",
'good_lines': ["{\n"],
'bad_lines': ["{color: LightGoldenRodYellow;"]},
{'pattern': ' thin[ ;]',
'description': "thin CSS attribute is under-specified, please use 1px.",
'good_lines': ["border-width: 1px;"],
'bad_lines': ["border-width: thin;", "border-width: thin solid black;"]},
{'pattern': ' medium[ ;]',
'description': "medium CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 3px;"],
'bad_lines': ["border-width: medium;", "border: medium solid black;"]},
{'pattern': ' thick[ ;]',
'description': "thick CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 5px;"],
'bad_lines': ["border-width: thick;", "border: thick solid black;"]},
]) + whitespace_rules + comma_whitespace_rule
prose_style_rules = cast(RuleList, [
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
'description': "javascript should be spelled JavaScript"},
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z",
'exclude_line': [('docs/translating/french.md', '* organization - **organisation**')]},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
{'pattern': 'Terms of service',
'description': "The S in Terms of Service is capitalized"},
{'pattern': '[^-_]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or bot server."},
]) + comma_whitespace_rule
html_rules = whitespace_rules + prose_style_rules + [
{'pattern': r'placeholder="[^{#](?:(?!\.com).)+$',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"')],
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ['<input placeholder="foo">']},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable.",
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ["<input placeholder='foo'>"]},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ["<button aria-label='foo'></button>"]},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ['<button aria-label="foo"></button>']},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/subsystems/front-end-build-process.md",
'exclude': set(["templates/corporate/billing.html", "templates/zerver/hello.html",
"templates/corporate/upgrade.html"]),
'good_lines': ["{{ render_bundle('landing-page') }}"],
'bad_lines': ['<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>']},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable.",
'good_lines': ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
'bad_lines': ["<p title='foo'></p>"]},
{'pattern': r'title="[^{\:]',
'exclude_line': set([
('templates/zerver/app/markdown_help.html',
'<td><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails"]),
'description': "`title` value should be translatable."},
{'pattern': r'''\Walt=["'][^{"']''',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display-settings.handlebars',
'templates/zerver/app/keyboard_shortcuts.html',
'templates/zerver/app/markdown_help.html']),
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
'bad_lines': ['<img alt="Foo Image" />']},
{'pattern': r'''\Walt=["']{{ ?["']''',
'description': "alt argument should be enclosed by _().",
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />'],
'bad_lines': ['<img alt="{{ " />']},
{'pattern': r'\bon\w+ ?=',
'description': "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
"the DOM is ready (inside a $(function () {...}) block).",
'exclude': set(['templates/zerver/dev_login.html']),
'good_lines': ["($('#foo').on('click', function () {}"],
'bad_lines': ["<button id='foo' onclick='myFunction()'>Foo</button>", "<input onchange='myFunction()'>"]},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude_pattern': r'.*style ?=["' + "'" + '](display: ?none|background: {{|color: {{|background-color: {{).*',
'exclude': set([
# KaTeX output uses style attribute
'templates/zerver/app/markdown_help.html',
# 5xx page doesn't have external CSS
'static/html/5xx.html',
# Group PMs color is dynamically calculated
'static/templates/group_pms.handlebars',
# exclude_pattern above handles color, but have other issues:
'static/templates/draft.handlebars',
'static/templates/subscription.handlebars',
'static/templates/single_message.handlebars',
# Old-style email templates need to use inline style
# attributes; it should be possible to clean these up
# when we convert these templates to use premailer.
'templates/zerver/emails/digest.html',
'templates/zerver/emails/missed_message.html',
'templates/zerver/emails/email_base_messages.html',
# Email log templates; should clean up.
'templates/zerver/email.html',
'templates/zerver/email_log.html',
# Probably just needs to be changed to display: none so the exclude works
'templates/zerver/app/navbar.html',
# Needs the width cleaned up; display: none is fine
'static/templates/settings/account-settings.handlebars',
# background image property is dynamically generated
'static/templates/user_profile_modal.handlebars',
# Inline styling for an svg; could be moved to CSS files?
'templates/zerver/landing_nav.html',
'templates/zerver/billing_nav.html',
'templates/zerver/app/home.html',
'templates/zerver/features.html',
'templates/zerver/portico-header.html',
'templates/corporate/billing.html',
# Miscellaneous violations to be cleaned up
'static/templates/user_info_popover_title.handlebars',
'static/templates/subscription_invites_warning_modal.handlebars',
'templates/zerver/reset_confirm.html',
'templates/zerver/config_error.html',
'templates/zerver/dev_env_email_access_details.html',
'templates/zerver/confirm_continue_registration.html',
'templates/zerver/register.html',
'templates/zerver/accounts_send_confirm.html',
'templates/zerver/integrations/index.html',
'templates/zerver/documentation_main.html',
'templates/analytics/realm_summary_table.html',
'templates/corporate/zephyr.html',
'templates/corporate/zephyr-mirror.html',
]),
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
] # type: RuleList
handlebars_rules = html_rules + [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': '{{ t ("|\')',
'description': 'There should be no spaces before the "t" in a translation tag.'},
{'pattern': r"{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r'{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': r"{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ("|\') ',
'description': 'Translatable strings should not have leading spaces.'},
{'pattern': "{{t '[^']+ ' }}",
'description': 'Translatable strings should not have trailing spaces.'},
{'pattern': '{{t "[^"]+ " }}',
'description': 'Translatable strings should not have trailing spaces.'},
]
jinja2_rules = html_rules + [
{'pattern': r"{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r"{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]
json_rules = [
# Here, we don't use `whitespace_rules`, because the tab-based
# whitespace rule flags a lot of third-party JSON fixtures
# under zerver/webhooks that we want preserved verbatim. So
# we just include the trailing whitespace rule and a modified
# version of the tab-based whitespace rule (we can't just use
# exclude in whitespace_rules, since we only want to ignore
# JSON files with tab-based whitespace, not webhook code).
trailing_whitespace_rule,
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/webhooks/']),
'description': 'Fix tab-based whitespace'},
{'pattern': r'":["\[\{]',
'exclude': set(['zerver/webhooks/', 'zerver/tests/fixtures/']),
'description': 'Require space after : in JSON'},
] # type: RuleList
markdown_rules = markdown_whitespace_rules + prose_style_rules + [
{'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'},
{'pattern': 'https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]',
'exclude': ['docs/overview/contributing.md', 'docs/overview/readme.md', 'docs/README.md'],
'include_only': set(['docs/']),
'description': "Use relative links (../foo/bar.html) to other documents in docs/",
},
{'pattern': r'\][(][^#h]',
'include_only': set(['README.md', 'CONTRIBUTING.md']),
'description': "Use absolute links from docs served by GitHub",
},
]
help_markdown_rules = markdown_rules + [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence"},
{'pattern': r'\b[rR]ealm[s]?\b',
'good_lines': ['Organization', 'deactivate_realm', 'realm_filter'],
'bad_lines': ['Users are in a realm', 'Realm is the best model'],
'description': "Realms are referred to as Organizations in user-facing docs."},
]
txt_rules = whitespace_rules
def check_custom_checks_py():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['py']:
if 'custom_check.py' in fn:
continue
if custom_check_file(fn, 'py', python_rules, color, max_length=110):
failed = True
return failed
def check_custom_checks_nonpy():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['js']:
if custom_check_file(fn, 'js', js_rules, color):
failed = True
color = next(colors)
for fn in by_lang['sh']:
if custom_check_file(fn, 'sh', bash_rules, color):
failed = True
color = next(colors)
for fn in by_lang['css']:
if custom_check_file(fn, 'css', css_rules, color):
failed = True
color = next(colors)
for fn in by_lang['handlebars']:
if custom_check_file(fn, 'handlebars', handlebars_rules, color):
failed = True
color = next(colors)
for fn in by_lang['html']:
if custom_check_file(fn, 'html', jinja2_rules, color):
failed = True
color = next(colors)
for fn in by_lang['json']:
if custom_check_file(fn, 'json', json_rules, color):
failed = True
color = next(colors)
markdown_docs_length_exclude = {
# Has some example Vagrant output that's very long
"docs/development/setup-vagrant.md",
# Have wide output in code blocks
"docs/subsystems/logging.md",
"docs/subsystems/migration-renumbering.md",
# Have curl commands with JSON that would be messy to wrap
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
# Has a very long configuration line
"templates/zerver/integrations/perforce.md",
# Has some example code that could perhaps be wrapped
"templates/zerver/api/incoming-webhooks-walkthrough.md",
# This macro has a long indented URL
"templates/zerver/help/include/git-webhook-url-with-branches-indented.md",
# These two are the same file and have some too-long lines for GitHub badges
"README.md",
"docs/overview/readme.md",
}
for fn in by_lang['md']:
max_length = None
if fn not in markdown_docs_length_exclude:
max_length = 120
rules = markdown_rules
if fn.startswith("templates/zerver/help"):
rules = help_markdown_rules
if custom_check_file(fn, 'md', rules, color, max_length=max_length):
failed = True
color = next(colors)
for fn in by_lang['txt'] + by_lang['text']:
if custom_check_file(fn, 'txt', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['rst']:
if custom_check_file(fn, 'rst', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['yaml']:
if custom_check_file(fn, 'yaml', txt_rules, color):
failed = True
return failed
return (check_custom_checks_py, check_custom_checks_nonpy)
| [
"str",
"str",
"RuleList",
"str",
"List[LineTup]",
"str",
"Optional[Iterable[str]]",
"Rule",
"str",
"str",
"RuleList",
"Optional[Iterable[str]]",
"str",
"int",
"List[LineTup]"
] | [
1404,
1824,
1836,
2441,
2484,
2538,
2577,
2635,
4473,
4512,
4546,
4585,
6602,
6649,
6695
] | [
1407,
1827,
1844,
2444,
2497,
2541,
2600,
2639,
4476,
4515,
4554,
4608,
6605,
6652,
6708
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/linter_lib/exclude.py | # Exclude some directories and files from lint checking
EXCLUDED_FILES = [
# Third-party code that doesn't match our style
"puppet/apt/.forge-release",
"puppet/apt/README.md",
"puppet/apt/manifests/backports.pp",
"puppet/apt/manifests/params.pp",
"puppet/apt/manifests/release.pp",
"puppet/apt/manifests/unattended_upgrades.pp",
"puppet/stdlib/tests/file_line.pp",
"puppet/zulip/files/nagios_plugins/zulip_nagios_server/check_website_response.sh",
"scripts/lib/third",
"static/third",
# Transifex syncs translation.json files without trailing
# newlines; there's nothing other than trailing newlines we'd be
# checking for in these anyway.
"static/locale",
]
PUPPET_CHECK_RULES_TO_EXCLUDE = [
"--no-documentation-check",
"--no-80chars-check",
]
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/linter_lib/pep8.py | from __future__ import print_function
from __future__ import absolute_import
import subprocess
from zulint.linters import run_pycodestyle
from typing import List
def check_pep8(files):
# type: (List[str]) -> bool
ignored_rules = [
# Each of these rules are ignored for the explained reason.
# "multiple spaces before operator"
# There are several typos here, but also several instances that are
# being used for alignment in dict keys/values using the `dict`
# constructor. We could fix the alignment cases by switching to the `{}`
# constructor, but it makes fixing this rule a little less
# straightforward.
'E221',
# 'missing whitespace around arithmetic operator'
# This should possibly be cleaned up, though changing some of
# these may make the code less readable.
'E226',
# New rules in pycodestyle 2.4.0 that we haven't decided whether to comply with yet
'E252', 'W504',
# "multiple spaces after ':'"
# This is the `{}` analogue of E221, and these are similarly being used
# for alignment.
'E241',
# "unexpected spaces around keyword / parameter equals"
# Many of these should be fixed, but many are also being used for
# alignment/making the code easier to read.
'E251',
# "block comment should start with '#'"
# These serve to show which lines should be changed in files customized
# by the user. We could probably resolve one of E265 or E266 by
# standardizing on a single style for lines that the user might want to
# change.
'E265',
# "too many leading '#' for block comment"
# Most of these are there for valid reasons.
'E266',
# "expected 2 blank lines after class or function definition"
# Zulip only uses 1 blank line after class/function
# definitions; the PEP-8 recommendation results in super sparse code.
'E302', 'E305',
# "module level import not at top of file"
# Most of these are there for valid reasons, though there might be a
# few that could be eliminated.
'E402',
# "line too long"
# Zulip is a bit less strict about line length, and has its
# own check for this (see max_length)
'E501',
# "do not assign a lambda expression, use a def"
# Fixing these would probably reduce readability in most cases.
'E731',
# "line break before binary operator"
# This is a bug in the `pep8`/`pycodestyle` tool -- it's completely backward.
# See https://github.com/PyCQA/pycodestyle/issues/498 .
'W503',
# This number will probably be used for the corrected, inverse version of
# W503 when that's added: https://github.com/PyCQA/pycodestyle/pull/502
# Once that fix lands and we update to a version of pycodestyle that has it,
# we'll want the rule; but we might have to briefly ignore it while we fix
# existing code.
# 'W504',
]
return run_pycodestyle(files, ignored_rules)
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/linter_lib/pyflakes.py | from __future__ import print_function
from __future__ import absolute_import
import argparse
import subprocess
from zulint.printer import print_err, colors
from typing import Any, Dict, List
suppress_patterns = [
(b'', b'imported but unused'),
(b'', b'redefinition of unused'),
# Our ipython startup pythonrc file intentionally imports *
(b"scripts/lib/pythonrc.py",
b" import *' used; unable to detect undefined names"),
# Special dev_settings.py import
(b'', b"from .prod_settings_template import *"),
(b"settings.py", b"settings import *' used; unable to detect undefined names"),
(b"settings.py", b"may be undefined, or defined from star imports"),
# Sphinx adds `tags` specially to the environment when running conf.py.
(b"docs/conf.py", b"undefined name 'tags'"),
]
def suppress_line(line: str) -> bool:
for file_pattern, line_pattern in suppress_patterns:
if file_pattern in line and line_pattern in line:
return True
return False
def check_pyflakes(files, options):
# type: (List[str], argparse.Namespace) -> bool
if len(files) == 0:
return False
failed = False
color = next(colors)
pyflakes = subprocess.Popen(['pyflakes'] + files,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert pyflakes.stdout is not None # Implied by use of subprocess.PIPE
for ln in pyflakes.stdout.readlines() + pyflakes.stderr.readlines():
if options.full or not suppress_line(ln):
print_err('pyflakes', color, ln)
failed = True
return failed
| [
"str"
] | [
850
] | [
853
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/run-dev.py | #!/usr/bin/env python3
import argparse
import os
import pwd
import signal
import subprocess
import sys
import time
import traceback
from urllib.parse import urlunparse
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
from tornado import httpclient
from tornado import httputil
from tornado import gen
from tornado import web
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler, websocket_connect
from typing import Any, Callable, Generator, List, Optional
if 'posix' in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
parser = argparse.ArgumentParser(description=r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""",
formatter_class=argparse.RawTextHelpFormatter)
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from tools.lib.test_script import (
get_provisioning_status,
)
parser.add_argument('--test',
action='store_true',
help='Use the testing database and ports')
parser.add_argument('--minify',
action='store_true',
help='Minifies assets for testing in dev')
parser.add_argument('--interface',
action='store',
default=None, help='Set the IP or hostname for the proxy to listen on')
parser.add_argument('--no-clear-memcached',
action='store_false', dest='clear_memcached',
default=True, help='Do not clear memcached')
parser.add_argument('--force',
action="store_true",
default=False, help='Run command despite possible problems.')
parser.add_argument('--enable-tornado-logging',
action="store_true",
default=False, help='Enable access logs from tornado proxy server.')
options = parser.parse_args()
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.interface is None:
user_id = os.getuid()
user_name = pwd.getpwuid(user_id).pw_name
if user_name in ["vagrant", "zulipdev"]:
# In the Vagrant development environment, we need to listen on
# all ports, and it's safe to do so, because Vagrant is only
# exposing certain guest ports (by default just 9991) to the
# host. The same argument applies to the remote development
# servers using username "zulipdev".
options.interface = None
else:
# Otherwise, only listen to requests on localhost for security.
options.interface = "127.0.0.1"
elif options.interface == "":
options.interface = None
runserver_args = [] # type: List[str]
base_port = 9991
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
# Don't auto-reload when running casper tests
runserver_args = ['--noreload']
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from scripts.lib.zulip_tools import WARNING, ENDC
proxy_port = base_port
django_port = base_port + 1
tornado_port = base_port + 2
webpack_port = base_port + 3
thumbor_port = base_port + 4
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
if options.clear_memcached:
print("Clearing memcached ...")
subprocess.check_call('./scripts/setup/flush-memcached')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Save pid of parent process to the pid file. It can be used later by
# tools/stop-run-dev to kill the server without having to find the
# terminal in question.
if options.test:
pid_file_path = os.path.join(os.path.join(os.getcwd(), 'var/casper/run_dev.pid'))
else:
pid_file_path = os.path.join(os.path.join(os.getcwd(), 'var/run/run_dev.pid'))
# Required for compatibility python versions.
if not os.path.exists(os.path.dirname(pid_file_path)):
os.makedirs(os.path.dirname(pid_file_path))
pid_file = open(pid_file_path, 'w+')
pid_file.write(str(os.getpgrp()) + "\n")
pid_file.close()
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./manage.py', 'runserver'] +
manage_args + runserver_args + ['127.0.0.1:%d' % (django_port,)],
['env', 'PYTHONUNBUFFERED=1', './manage.py', 'runtornado'] +
manage_args + ['127.0.0.1:%d' % (tornado_port,)],
['./tools/run-dev-queue-processors'] + manage_args,
['env', 'PGHOST=127.0.0.1', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates'],
['./manage.py', 'deliver_scheduled_messages'],
['/srv/zulip-thumbor-venv/bin/thumbor', '-c', './zthumbor/thumbor.conf',
'-p', '%s' % (thumbor_port,)]]
if options.test:
# We just need to compile handlebars templates and webpack assets
# once at startup, not run a daemon, in test mode. Additionally,
# webpack-dev-server doesn't support running 2 copies on the same
# system, so this model lets us run the casper tests with a running
# development server.
subprocess.check_call(['./tools/compile-handlebars-templates'])
subprocess.check_call(['./tools/webpack', '--quiet', '--test'])
else:
cmds.append(['./tools/compile-handlebars-templates', 'forever'])
webpack_cmd = ['./tools/webpack', '--watch', '--port', str(webpack_port)]
if options.minify:
webpack_cmd.append('--minify')
if options.interface is None:
# If interface is None and we're listening on all ports, we also need
# to disable the webpack host check so that webpack will serve assets.
webpack_cmd.append('--disable-host-check')
if options.interface:
webpack_cmd += ["--host", options.interface]
else:
webpack_cmd += ["--host", "0.0.0.0"]
cmds.append(webpack_cmd)
for cmd in cmds:
subprocess.Popen(cmd)
def transform_url(protocol, path, query, target_port, target_host):
# type: (str, str, str, int, str) -> str
# generate url with target host
host = ":".join((target_host, str(target_port)))
# Here we are going to rewrite the path a bit so that it is in parity with
# what we will have for production
if path.startswith('/thumbor'):
path = path[len('/thumbor'):]
newpath = urlunparse((protocol, host, path, '', query, ''))
return newpath
@gen.engine
def fetch_request(url, callback, **kwargs):
# type: (str, Any, **Any) -> Generator[Callable[..., Any], Any, None]
# use large timeouts to handle polling requests
req = httpclient.HTTPRequest(url, connect_timeout=240.0, request_timeout=240.0, **kwargs)
client = httpclient.AsyncHTTPClient()
# wait for response
response = yield gen.Task(client.fetch, req)
callback(response)
class BaseWebsocketHandler(WebSocketHandler):
# target server ip
target_host = '127.0.0.1' # type: str
# target server port
target_port = None # type: int
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super().__init__(*args, **kwargs)
# define client for target websocket server
self.client = None # type: Any
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Callable[..., Any]]
# use get method from WebsocketHandler
return super().get(*args, **kwargs)
def open(self):
# type: () -> None
# setup connection with target websocket server
websocket_url = "ws://{host}:{port}{uri}".format(
host=self.target_host,
port=self.target_port,
uri=self.request.uri
)
request = httpclient.HTTPRequest(websocket_url)
request.headers = self._add_request_headers(['sec-websocket-extensions'])
websocket_connect(request, callback=self.open_callback,
on_message_callback=self.on_client_message)
def open_callback(self, future):
# type: (Any) -> None
# callback on connect with target websocket server
self.client = future.result()
def on_client_message(self, message):
# type: (str) -> None
if not message:
# if message empty -> target websocket server close connection
return self.close()
if self.ws_connection:
# send message to client if connection exists
self.write_message(message, False)
def on_message(self, message, binary=False):
# type: (str, bool) -> Optional[Callable[..., Any]]
if not self.client:
# close websocket proxy connection if no connection with target websocket server
return self.close()
self.client.write_message(message, binary)
return None
def check_origin(self, origin):
# type: (str) -> bool
return True
def _add_request_headers(self, exclude_lower_headers_list=None):
# type: (Optional[List[str]]) -> httputil.HTTPHeaders
exclude_lower_headers_list = exclude_lower_headers_list or []
headers = httputil.HTTPHeaders()
for header, v in self.request.headers.get_all():
if header.lower() not in exclude_lower_headers_list:
headers.add(header, v)
return headers
class CombineHandler(BaseWebsocketHandler):
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Callable[..., Any]]
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super().get(*args, **kwargs)
return None
def head(self):
# type: () -> None
pass
def post(self):
# type: () -> None
pass
def put(self):
# type: () -> None
pass
def patch(self):
# type: () -> None
pass
def options(self):
# type: () -> None
pass
def delete(self):
# type: () -> None
pass
def handle_response(self, response):
# type: (Any) -> None
if response.error and not isinstance(response.error, httpclient.HTTPError):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header != 'Content-Length':
# some header appear multiple times, eg 'Set-Cookie'
self.add_header(header, v)
if response.body:
# rewrite Content-Length Header by the response
self.set_header('Content-Length', len(response.body))
self.write(response.body)
self.finish()
@web.asynchronous
def prepare(self):
# type: () -> None
if 'X-REAL-IP' not in self.request.headers:
self.request.headers['X-REAL-IP'] = self.request.remote_ip
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super().prepare()
url = transform_url(
self.request.protocol,
self.request.path,
self.request.query,
self.target_port,
self.target_host,
)
try:
fetch_request(
url=url,
callback=self.handle_response,
method=self.request.method,
headers=self._add_request_headers(["upgrade-insecure-requests"]),
follow_redirects=False,
body=getattr(self.request, 'body'),
allow_nonstandard_methods=True
)
except httpclient.HTTPError as e:
if hasattr(e, 'response') and e.response:
self.handle_response(e.response)
else:
self.set_status(500)
self.write('Internal server error:\n' + str(e))
self.finish()
class WebPackHandler(CombineHandler):
target_port = webpack_port
class DjangoHandler(CombineHandler):
target_port = django_port
class TornadoHandler(CombineHandler):
target_port = tornado_port
class ThumborHandler(CombineHandler):
target_port = thumbor_port
class Application(web.Application):
def __init__(self, enable_logging=False):
# type: (bool) -> None
handlers = [
(r"/json/events.*", TornadoHandler),
(r"/api/v1/events.*", TornadoHandler),
(r"/webpack.*", WebPackHandler),
(r"/sockjs.*", TornadoHandler),
(r"/thumbor.*", ThumborHandler),
(r"/.*", DjangoHandler)
]
super().__init__(handlers, enable_logging=enable_logging)
def log_request(self, handler):
# type: (BaseWebsocketHandler) -> None
if self.settings['enable_logging']:
super().log_request(handler)
def on_shutdown():
# type: () -> None
IOLoop.instance().stop()
def shutdown_handler(*args, **kwargs):
# type: (*Any, **Any) -> None
io_loop = IOLoop.instance()
if io_loop._callbacks:
io_loop.call_later(1, shutdown_handler)
else:
io_loop.stop()
# log which services/ports will be started
print("Starting Zulip services on ports: web proxy: {},".format(proxy_port),
"Django: {}, Tornado: {}, Thumbor: {}".format(django_port, tornado_port, thumbor_port),
end='')
if options.test:
print("") # no webpack for --test
else:
print(", webpack: {}".format(webpack_port))
print("".join((WARNING,
"Note: only port {} is exposed to the host in a Vagrant environment.".format(
proxy_port), ENDC)))
try:
app = Application(enable_logging=options.enable_tornado_logging)
app.listen(proxy_port, address=options.interface)
ioloop = IOLoop.instance()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, shutdown_handler)
ioloop.start()
except Exception:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
# Remove pid file when development server closed correctly.
os.remove(pid_file_path)
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/setup/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/setup/emoji/emoji_names.py | from typing import Any, Dict
EMOJI_NAME_MAPS = {
# seems like best emoji for happy
'1f600': {'canonical_name': 'grinning', 'aliases': ['happy']},
'1f603': {'canonical_name': 'smiley', 'aliases': []},
# the google emoji for this is not great, so made People/9 'smile' and
# renamed this one
'1f604': {'canonical_name': 'big_smile', 'aliases': []},
# from gemoji/unicode
'1f601': {'canonical_name': 'grinning_face_with_smiling_eyes', 'aliases': []},
# satisfied doesn't seem like a good description of these images
'1f606': {'canonical_name': 'laughing', 'aliases': ['lol']},
'1f605': {'canonical_name': 'sweat_smile', 'aliases': []},
# laughter_tears from https://beebom.com/emoji-meanings/
'1f602': {'canonical_name': 'joy', 'aliases': ['tears', 'laughter_tears']},
'1f923': {'canonical_name': 'rolling_on_the_floor_laughing', 'aliases': ['rofl']},
# not sure how the glpyhs match relaxed, but both iamcal and gemoji have it
'263a': {'canonical_name': 'smile', 'aliases': ['relaxed']},
'1f60a': {'canonical_name': 'blush', 'aliases': []},
# halo comes from gemoji/unicode
'1f607': {'canonical_name': 'innocent', 'aliases': ['halo']},
'1f642': {'canonical_name': 'slight_smile', 'aliases': []},
'1f643': {'canonical_name': 'upside_down', 'aliases': ['oops']},
'1f609': {'canonical_name': 'wink', 'aliases': []},
'1f60c': {'canonical_name': 'relieved', 'aliases': []},
# in_love from https://beebom.com/emoji-meanings/
'1f60d': {'canonical_name': 'heart_eyes', 'aliases': ['in_love']},
# blow_a_kiss from https://beebom.com/emoji-meanings/
'1f618': {'canonical_name': 'heart_kiss', 'aliases': ['blow_a_kiss']},
'1f617': {'canonical_name': 'kiss', 'aliases': []},
'1f619': {'canonical_name': 'kiss_smiling_eyes', 'aliases': []},
'1f61a': {'canonical_name': 'kiss_with_blush', 'aliases': []},
'1f60b': {'canonical_name': 'yum', 'aliases': []},
# crazy from https://beebom.com/emoji-meanings/, seems like best emoji for
# joking
'1f61c': {'canonical_name': 'stuck_out_tongue_wink', 'aliases': ['joking', 'crazy']},
'1f61d': {'canonical_name': 'stuck_out_tongue', 'aliases': []},
# don't really need two stuck_out_tongues (see People/23), so chose
# something else that could fit
'1f61b': {'canonical_name': 'mischievous', 'aliases': []},
# kaching suggested by user
'1f911': {'canonical_name': 'money_face', 'aliases': ['kaching']},
# arms_open seems like a natural addition
'1f917': {'canonical_name': 'hug', 'aliases': ['arms_open']},
'1f913': {'canonical_name': 'nerd', 'aliases': ['geek']},
# several sites suggested this was used for "cool", but cool is taken by
# Symbols/137
'1f60e': {'canonical_name': 'sunglasses', 'aliases': []},
'1f921': {'canonical_name': 'clown', 'aliases': []},
'1f920': {'canonical_name': 'cowboy', 'aliases': []},
# https://emojipedia.org/smirking-face/
'1f60f': {'canonical_name': 'smirk', 'aliases': ['smug']},
'1f612': {'canonical_name': 'unamused', 'aliases': []},
'1f61e': {'canonical_name': 'disappointed', 'aliases': []},
# see People/41
'1f614': {'canonical_name': 'pensive', 'aliases': ['tired']},
'1f61f': {'canonical_name': 'worried', 'aliases': []},
# these seem to better capture the glyphs. This is also what :/ turns into
# in google hangouts
'1f615': {'canonical_name': 'oh_no', 'aliases': ['half_frown', 'concerned', 'confused']},
'1f641': {'canonical_name': 'frown', 'aliases': ['slight_frown']},
# sad seemed better than putting another frown as the primary name (see
# People/37)
'2639': {'canonical_name': 'sad', 'aliases': ['big_frown']},
# helpless from https://emojipedia.org/persevering-face/
'1f623': {'canonical_name': 'persevere', 'aliases': ['helpless']},
# agony seemed like a good addition
'1f616': {'canonical_name': 'confounded', 'aliases': ['agony']},
# tired doesn't really match any of the 4 images, put it on People/34
'1f62b': {'canonical_name': 'anguish', 'aliases': []},
# distraught from https://beebom.com/emoji-meanings/
'1f629': {'canonical_name': 'weary', 'aliases': ['distraught']},
'1f624': {'canonical_name': 'triumph', 'aliases': []},
'1f620': {'canonical_name': 'angry', 'aliases': []},
# mad and grumpy from https://beebom.com/emoji-meanings/, very_angry to
# parallel People/44 and show up in typeahead for "ang.."
'1f621': {'canonical_name': 'rage', 'aliases': ['mad', 'grumpy', 'very_angry']},
# blank from https://beebom.com/emoji-meanings/, speechless and poker_face
# seemed like good ideas for this
'1f636': {'canonical_name': 'speechless', 'aliases': ['no_mouth', 'blank', 'poker_face']},
'1f610': {'canonical_name': 'neutral', 'aliases': []},
'1f611': {'canonical_name': 'expressionless', 'aliases': []},
'1f62f': {'canonical_name': 'hushed', 'aliases': []},
'1f626': {'canonical_name': 'frowning', 'aliases': []},
# pained from https://beebom.com/emoji-meanings/
'1f627': {'canonical_name': 'anguished', 'aliases': ['pained']},
# surprise from https://emojipedia.org/face-with-open-mouth/
'1f62e': {'canonical_name': 'open_mouth', 'aliases': ['surprise']},
'1f632': {'canonical_name': 'astonished', 'aliases': []},
'1f635': {'canonical_name': 'dizzy', 'aliases': []},
# the alternates are from https://emojipedia.org/flushed-face/. shame
# doesn't work with the google emoji
'1f633': {'canonical_name': 'flushed', 'aliases': ['embarrassed', 'blushing']},
'1f631': {'canonical_name': 'scream', 'aliases': []},
# scared from https://emojipedia.org/fearful-face/, shock seemed like a
# nice addition
'1f628': {'canonical_name': 'fear', 'aliases': ['scared', 'shock']},
'1f630': {'canonical_name': 'cold_sweat', 'aliases': []},
'1f622': {'canonical_name': 'cry', 'aliases': []},
# stressed from https://beebom.com/emoji-meanings/. The internet generally
# didn't seem to know what to make of the dissapointed_relieved name, and I
# got the sense it wasn't an emotion that was often used. Hence replaced it
# with exhausted.
'1f625': {'canonical_name': 'exhausted', 'aliases': ['disappointed_relieved', 'stressed']},
'1f924': {'canonical_name': 'drooling', 'aliases': []},
'1f62d': {'canonical_name': 'sob', 'aliases': []},
'1f613': {'canonical_name': 'sweat', 'aliases': []},
'1f62a': {'canonical_name': 'sleepy', 'aliases': []},
'1f634': {'canonical_name': 'sleeping', 'aliases': []},
'1f644': {'canonical_name': 'rolling_eyes', 'aliases': []},
'1f914': {'canonical_name': 'thinking', 'aliases': []},
'1f925': {'canonical_name': 'lying', 'aliases': []},
# seems like best emoji for nervous/anxious
'1f62c': {'canonical_name': 'grimacing', 'aliases': ['nervous', 'anxious']},
# zip_it from http://mashable.com/2015/10/23/ios-9-1-emoji-guide,
# lips_sealed from https://emojipedia.org/zipper-mouth-face/, rest seemed
# like reasonable additions
'1f910': {'canonical_name': 'silence', 'aliases': ['quiet', 'hush', 'zip_it', 'lips_are_sealed']},
# queasy seemed like a natural addition
'1f922': {'canonical_name': 'nauseated', 'aliases': ['queasy']},
'1f927': {'canonical_name': 'sneezing', 'aliases': []},
# cant_talk from https://beebom.com/emoji-meanings/
'1f637': {'canonical_name': 'cant_talk', 'aliases': ['mask']},
# flu from http://mashable.com/2015/10/23/ios-9-1-emoji-guide, sick from
# https://emojipedia.org/face-with-thermometer/, face_with_thermometer so
# it shows up in typeahead (thermometer taken by Objects/82)
'1f912': {'canonical_name': 'sick', 'aliases': ['flu', 'face_with_thermometer']},
# hurt and injured from https://beebom.com/emoji-meanings/. Chose hurt as
# primary since I think it can cover a wider set of things (e.g. emotional
# hurt)
'1f915': {'canonical_name': 'hurt', 'aliases': ['head_bandage', 'injured']},
# devil from https://emojipedia.org/smiling-face-with-horns/,
# smiling_face_with_horns from gemoji/unicode
'1f608': {'canonical_name': 'smiling_devil', 'aliases': ['smiling_imp', 'smiling_face_with_horns']},
# angry_devil from https://beebom.com/emoji-meanings/
'1f47f': {'canonical_name': 'devil', 'aliases': ['imp', 'angry_devil']},
'1f479': {'canonical_name': 'ogre', 'aliases': []},
'1f47a': {'canonical_name': 'goblin', 'aliases': []},
# pile_of_poo from gemoji/unicode
'1f4a9': {'canonical_name': 'poop', 'aliases': ['pile_of_poo']},
# alternates seemed like reasonable additions
'1f47b': {'canonical_name': 'ghost', 'aliases': ['boo', 'spooky', 'haunted']},
'1f480': {'canonical_name': 'skull', 'aliases': []},
# alternates seemed like reasonable additions
'2620': {'canonical_name': 'skull_and_crossbones', 'aliases': ['pirate', 'death', 'hazard', 'toxic', 'poison']}, # ignorelongline
# ufo seemed like a natural addition
'1f47d': {'canonical_name': 'alien', 'aliases': ['ufo']},
'1f47e': {'canonical_name': 'space_invader', 'aliases': []},
'1f916': {'canonical_name': 'robot', 'aliases': []},
# pumpkin seemed like a natural addition
'1f383': {'canonical_name': 'jack-o-lantern', 'aliases': ['pumpkin']},
'1f63a': {'canonical_name': 'smiley_cat', 'aliases': []},
'1f638': {'canonical_name': 'smile_cat', 'aliases': []},
'1f639': {'canonical_name': 'joy_cat', 'aliases': []},
'1f63b': {'canonical_name': 'heart_eyes_cat', 'aliases': []},
# smug_cat to parallel People/31
'1f63c': {'canonical_name': 'smirk_cat', 'aliases': ['smug_cat']},
'1f63d': {'canonical_name': 'kissing_cat', 'aliases': []},
# weary_cat from unicode/gemoji
'1f640': {'canonical_name': 'scream_cat', 'aliases': ['weary_cat']},
'1f63f': {'canonical_name': 'crying_cat', 'aliases': []},
# angry_cat to better parallel People/45
'1f63e': {'canonical_name': 'angry_cat', 'aliases': ['pouting_cat']},
'1f450': {'canonical_name': 'open_hands', 'aliases': []},
# praise from
# https://emojipedia.org/person-raising-both-hands-in-celebration/
'1f64c': {'canonical_name': 'raised_hands', 'aliases': ['praise']},
# applause from https://emojipedia.org/clapping-hands-sign/
'1f44f': {'canonical_name': 'clap', 'aliases': ['applause']},
# welcome and thank_you from
# https://emojipedia.org/person-with-folded-hands/, namaste from indian
# culture
'1f64f': {'canonical_name': 'pray', 'aliases': ['welcome', 'thank_you', 'namaste']},
# done_deal seems like a natural addition
'1f91d': {'canonical_name': 'handshake', 'aliases': ['done_deal']},
'1f44d': {'canonical_name': '+1', 'aliases': ['thumbs_up']},
'1f44e': {'canonical_name': '-1', 'aliases': ['thumbs_down']},
# fist_bump from https://beebom.com/emoji-meanings/
'1f44a': {'canonical_name': 'fist_bump', 'aliases': ['punch']},
# used as power in social justice movements
'270a': {'canonical_name': 'fist', 'aliases': ['power']},
'1f91b': {'canonical_name': 'left_fist', 'aliases': []},
'1f91c': {'canonical_name': 'right_fist', 'aliases': []},
'1f91e': {'canonical_name': 'fingers_crossed', 'aliases': []},
# seems to be mostly used as peace on twitter
'270c': {'canonical_name': 'peace_sign', 'aliases': ['victory']},
# https://emojipedia.org/sign-of-the-horns/
'1f918': {'canonical_name': 'rock_on', 'aliases': ['sign_of_the_horns']},
# got_it seems like a natural addition
'1f44c': {'canonical_name': 'ok', 'aliases': ['got_it']},
'1f448': {'canonical_name': 'point_left', 'aliases': []},
'1f449': {'canonical_name': 'point_right', 'aliases': []},
# :this: is a way of emphasizing the previous message. point_up instead of
# point_up_2 so that point_up better matches the other point_*s
'1f446': {'canonical_name': 'point_up', 'aliases': ['this']},
'1f447': {'canonical_name': 'point_down', 'aliases': []},
# People/114 is point_up. These seemed better than naming it point_up_2,
# and point_of_information means it will come up in typeahead for 'point'
'261d': {'canonical_name': 'wait_one_second', 'aliases': ['point_of_information', 'asking_a_question']},
'270b': {'canonical_name': 'hand', 'aliases': ['raised_hand']},
# seems like best emoji for stop, raised_back_of_hand doesn't seem that
# useful
'1f91a': {'canonical_name': 'stop', 'aliases': []},
# seems like best emoji for high_five, raised_hand_with_fingers_splayed
# doesn't seem that useful
'1f590': {'canonical_name': 'high_five', 'aliases': ['palm']},
# http://mashable.com/2015/10/23/ios-9-1-emoji-guide
'1f596': {'canonical_name': 'spock', 'aliases': ['live_long_and_prosper']},
# People/119 is a better 'hi', but 'hi' will never show up in the typeahead
# due to 'high_five'
'1f44b': {'canonical_name': 'wave', 'aliases': ['hello', 'hi']},
'1f919': {'canonical_name': 'call_me', 'aliases': []},
# flexed_biceps from gemoji/unicode, strong seemed like a good addition
'1f4aa': {'canonical_name': 'muscle', 'aliases': []},
'1f595': {'canonical_name': 'middle_finger', 'aliases': []},
'270d': {'canonical_name': 'writing', 'aliases': []},
'1f933': {'canonical_name': 'selfie', 'aliases': []},
# Couldn't figure out why iamcal chose nail_care. unicode uses nail_polish,
# gemoji uses both
'1f485': {'canonical_name': 'nail_polish', 'aliases': ['nail_care']},
'1f48d': {'canonical_name': 'ring', 'aliases': []},
'1f484': {'canonical_name': 'lipstick', 'aliases': []},
# People/18 seems like a better kiss for most circumstances
'1f48b': {'canonical_name': 'lipstick_kiss', 'aliases': []},
# mouth from gemoji/unicode
'1f444': {'canonical_name': 'lips', 'aliases': ['mouth']},
'1f445': {'canonical_name': 'tongue', 'aliases': []},
'1f442': {'canonical_name': 'ear', 'aliases': []},
'1f443': {'canonical_name': 'nose', 'aliases': []},
# seems a better feet than Nature/86 (paw_prints)
'1f463': {'canonical_name': 'footprints', 'aliases': ['feet']},
'1f441': {'canonical_name': 'eye', 'aliases': []},
# seemed the best emoji for looking
'1f440': {'canonical_name': 'eyes', 'aliases': ['looking']},
'1f5e3': {'canonical_name': 'speaking_head', 'aliases': []},
# shadow seems like a good addition
'1f464': {'canonical_name': 'silhouette', 'aliases': ['shadow']},
# to parallel People/139
'1f465': {'canonical_name': 'silhouettes', 'aliases': ['shadows']},
'1f476': {'canonical_name': 'baby', 'aliases': []},
'1f466': {'canonical_name': 'boy', 'aliases': []},
'1f467': {'canonical_name': 'girl', 'aliases': []},
'1f468': {'canonical_name': 'man', 'aliases': []},
'1f469': {'canonical_name': 'woman', 'aliases': []},
# It's used on twitter a bunch, either when showing off hair, or in a way
# where People/144 would substitute. It'd be nice if there were another
# emoji one could use for "good hair", but I think not a big loss to not
# have one for zulip, and not worth the eurocentrism.
# '1f471': {'canonical_name': 'X', 'aliases': ['person_with_blond_hair']},
# Added elderly since I think some people prefer that term
'1f474': {'canonical_name': 'older_man', 'aliases': ['elderly_man']},
# Added elderly since I think some people prefer that term
'1f475': {'canonical_name': 'older_woman', 'aliases': ['elderly_woman']},
'1f472': {'canonical_name': 'gua_pi_mao', 'aliases': []},
'1f473': {'canonical_name': 'turban', 'aliases': []},
# police seems like a more polite term, and matches the unicode
'1f46e': {'canonical_name': 'police', 'aliases': ['cop']},
'1f477': {'canonical_name': 'construction_worker', 'aliases': []},
'1f482': {'canonical_name': 'guard', 'aliases': []},
# detective from gemoji, sneaky from
# http://mashable.com/2015/10/23/ios-9-1-emoji-guide/, agent seems a
# reasonable addition
'1f575': {'canonical_name': 'detective', 'aliases': ['spy', 'sleuth', 'agent', 'sneaky']},
# mrs_claus from https://emojipedia.org/mother-christmas/
'1f936': {'canonical_name': 'mother_christmas', 'aliases': ['mrs_claus']},
'1f385': {'canonical_name': 'santa', 'aliases': []},
'1f478': {'canonical_name': 'princess', 'aliases': []},
'1f934': {'canonical_name': 'prince', 'aliases': []},
'1f470': {'canonical_name': 'bride', 'aliases': []},
'1f935': {'canonical_name': 'tuxedo', 'aliases': []},
'1f47c': {'canonical_name': 'angel', 'aliases': []},
# expecting seems like a good addition
'1f930': {'canonical_name': 'pregnant', 'aliases': ['expecting']},
'1f647': {'canonical_name': 'bow', 'aliases': []},
# mostly used sassily. person_tipping_hand from
# https://emojipedia.org/information-desk-person/
'1f481': {'canonical_name': 'information_desk_person', 'aliases': ['person_tipping_hand']},
# no_signal to parallel People/207. Nope seems like a reasonable addition
'1f645': {'canonical_name': 'no_signal', 'aliases': ['nope']},
'1f646': {'canonical_name': 'ok_signal', 'aliases': []},
# pick_me seems like a good addition
'1f64b': {'canonical_name': 'raising_hand', 'aliases': ['pick_me']},
'1f926': {'canonical_name': 'face_palm', 'aliases': []},
'1f937': {'canonical_name': 'shrug', 'aliases': []},
'1f64e': {'canonical_name': 'person_pouting', 'aliases': []},
'1f64d': {'canonical_name': 'person_frowning', 'aliases': []},
'1f487': {'canonical_name': 'haircut', 'aliases': []},
'1f486': {'canonical_name': 'massage', 'aliases': []},
# hover seems like a reasonable addition
'1f574': {'canonical_name': 'levitating', 'aliases': ['hover']},
'1f483': {'canonical_name': 'dancer', 'aliases': []},
'1f57a': {'canonical_name': 'dancing', 'aliases': ['disco']},
'1f46f': {'canonical_name': 'dancers', 'aliases': []},
# pedestrian seems like reasonable addition
'1f6b6': {'canonical_name': 'walking', 'aliases': ['pedestrian']},
'1f3c3': {'canonical_name': 'running', 'aliases': ['runner']},
'1f46b': {'canonical_name': 'man_and_woman_holding_hands', 'aliases': ['man_and_woman_couple']},
# to parallel People/234
'1f46d': {'canonical_name': 'two_women_holding_hands', 'aliases': ['women_couple']},
# to parallel People/234
'1f46c': {'canonical_name': 'two_men_holding_hands', 'aliases': ['men_couple']},
# no need for man-woman-boy, since we aren't including the other family
# combos
'1f46a': {'canonical_name': 'family', 'aliases': []},
'1f45a': {'canonical_name': 'clothing', 'aliases': []},
'1f455': {'canonical_name': 'shirt', 'aliases': ['tshirt']},
# denim seems like a good addition
'1f456': {'canonical_name': 'jeans', 'aliases': ['denim']},
# tie is shorter, and a bit more general
'1f454': {'canonical_name': 'tie', 'aliases': []},
'1f457': {'canonical_name': 'dress', 'aliases': []},
'1f459': {'canonical_name': 'bikini', 'aliases': []},
'1f458': {'canonical_name': 'kimono', 'aliases': []},
# I feel like this is always used in the plural
'1f460': {'canonical_name': 'high_heels', 'aliases': []},
# flip_flops seems like a reasonable addition
'1f461': {'canonical_name': 'sandal', 'aliases': ['flip_flops']},
'1f462': {'canonical_name': 'boot', 'aliases': []},
'1f45e': {'canonical_name': 'shoe', 'aliases': []},
# running_shoe is from gemoji, sneaker seems like a reasonable addition
'1f45f': {'canonical_name': 'athletic_shoe', 'aliases': ['sneaker', 'running_shoe']},
'1f452': {'canonical_name': 'hat', 'aliases': []},
'1f3a9': {'canonical_name': 'top_hat', 'aliases': []},
# graduate seems like a better word for this
'1f393': {'canonical_name': 'graduate', 'aliases': ['mortar_board']},
# king and queen seem like good additions
'1f451': {'canonical_name': 'crown', 'aliases': ['queen', 'king']},
# safety and invincibility inspired by
# http://mashable.com/2015/10/23/ios-9-1-emoji-guide. hard_hat and
# rescue_worker seem like good additions
'26d1': {'canonical_name': 'helmet', 'aliases': ['hard_hat', 'rescue_worker', 'safety_first', 'invincible']}, # ignorelongline
# backpack from gemoji, dominates satchel on google trends
'1f392': {'canonical_name': 'backpack', 'aliases': ['satchel']},
'1f45d': {'canonical_name': 'pouch', 'aliases': []},
'1f45b': {'canonical_name': 'purse', 'aliases': []},
'1f45c': {'canonical_name': 'handbag', 'aliases': []},
'1f4bc': {'canonical_name': 'briefcase', 'aliases': []},
# glasses seems a more common term than eyeglasses, spectacles seems like a
# reasonable synonym to add
'1f453': {'canonical_name': 'glasses', 'aliases': ['spectacles']},
'1f576': {'canonical_name': 'dark_sunglasses', 'aliases': []},
'1f302': {'canonical_name': 'closed_umbrella', 'aliases': []},
'2602': {'canonical_name': 'umbrella', 'aliases': []},
# Some animals have a unicode codepoint "<animal>", some have a codepoint
# "<animal> face", and some have both. If an animal has just a single
# codepoint, we call it <animal>, regardless of what the codepoint is. If
# an animal has both, we call the "<animal>" codepoint <animal>, and come
# up with something else useful-seeming for the "<animal> face" codepoint.
# The reason we chose "<animal> face" for the non-standard name (instead of
# giving "<animal>" the non-standard name, as iamcal does) is because the
# apple emoji for the "<animal>"s are too realistic. E.g. Apple's Nature/76
# is less plausibly a puppy than this one.
'1f436': {'canonical_name': 'puppy', 'aliases': []},
'1f431': {'canonical_name': 'kitten', 'aliases': []},
'1f42d': {'canonical_name': 'dormouse', 'aliases': []},
'1f439': {'canonical_name': 'hamster', 'aliases': []},
'1f430': {'canonical_name': 'bunny', 'aliases': []},
'1f98a': {'canonical_name': 'fox', 'aliases': []},
'1f43b': {'canonical_name': 'bear', 'aliases': []},
'1f43c': {'canonical_name': 'panda', 'aliases': []},
'1f428': {'canonical_name': 'koala', 'aliases': []},
'1f42f': {'canonical_name': 'tiger_cub', 'aliases': []},
'1f981': {'canonical_name': 'lion', 'aliases': []},
'1f42e': {'canonical_name': 'calf', 'aliases': []},
'1f437': {'canonical_name': 'piglet', 'aliases': []},
'1f43d': {'canonical_name': 'pig_nose', 'aliases': []},
'1f438': {'canonical_name': 'frog', 'aliases': []},
'1f435': {'canonical_name': 'monkey_face', 'aliases': []},
'1f648': {'canonical_name': 'see_no_evil', 'aliases': []},
'1f649': {'canonical_name': 'hear_no_evil', 'aliases': []},
'1f64a': {'canonical_name': 'speak_no_evil', 'aliases': []},
'1f412': {'canonical_name': 'monkey', 'aliases': []},
# cluck seemed like a good addition
'1f414': {'canonical_name': 'chicken', 'aliases': ['cluck']},
'1f427': {'canonical_name': 'penguin', 'aliases': []},
'1f426': {'canonical_name': 'bird', 'aliases': []},
'1f424': {'canonical_name': 'chick', 'aliases': ['baby_chick']},
'1f423': {'canonical_name': 'hatching', 'aliases': ['hatching_chick']},
# http://www.iemoji.com/view/emoji/668/animals-nature/front-facing-baby-chick
'1f425': {'canonical_name': 'new_baby', 'aliases': []},
'1f986': {'canonical_name': 'duck', 'aliases': []},
'1f985': {'canonical_name': 'eagle', 'aliases': []},
'1f989': {'canonical_name': 'owl', 'aliases': []},
'1f987': {'canonical_name': 'bat', 'aliases': []},
'1f43a': {'canonical_name': 'wolf', 'aliases': []},
'1f417': {'canonical_name': 'boar', 'aliases': []},
'1f434': {'canonical_name': 'pony', 'aliases': []},
'1f984': {'canonical_name': 'unicorn', 'aliases': []},
# buzz seemed like a reasonable addition
'1f41d': {'canonical_name': 'bee', 'aliases': ['buzz', 'honeybee']},
# caterpillar seemed like a reasonable addition
'1f41b': {'canonical_name': 'bug', 'aliases': ['caterpillar']},
'1f98b': {'canonical_name': 'butterfly', 'aliases': []},
'1f40c': {'canonical_name': 'snail', 'aliases': []},
# spiral_shell from unicode/gemoji, the others seemed like reasonable
# additions
'1f41a': {'canonical_name': 'shell', 'aliases': ['seashell', 'conch', 'spiral_shell']},
# unicode/gemoji have lady_beetle; hopefully with ladybug we get both the
# people that prefer lady_beetle (with beetle) and ladybug. There is also
# ladybird, but seems a bit much for this to complete for bird.
'1f41e': {'canonical_name': 'beetle', 'aliases': ['ladybug']},
'1f41c': {'canonical_name': 'ant', 'aliases': []},
'1f577': {'canonical_name': 'spider', 'aliases': []},
'1f578': {'canonical_name': 'web', 'aliases': ['spider_web']},
# tortoise seemed like a reasonable addition
'1f422': {'canonical_name': 'turtle', 'aliases': ['tortoise']},
# put in a few animal sounds, including this one
'1f40d': {'canonical_name': 'snake', 'aliases': ['hiss']},
'1f98e': {'canonical_name': 'lizard', 'aliases': ['gecko']},
'1f982': {'canonical_name': 'scorpion', 'aliases': []},
'1f980': {'canonical_name': 'crab', 'aliases': []},
'1f991': {'canonical_name': 'squid', 'aliases': []},
'1f419': {'canonical_name': 'octopus', 'aliases': []},
'1f990': {'canonical_name': 'shrimp', 'aliases': []},
'1f420': {'canonical_name': 'tropical_fish', 'aliases': []},
'1f41f': {'canonical_name': 'fish', 'aliases': []},
'1f421': {'canonical_name': 'blowfish', 'aliases': []},
'1f42c': {'canonical_name': 'dolphin', 'aliases': ['flipper']},
'1f988': {'canonical_name': 'shark', 'aliases': []},
'1f433': {'canonical_name': 'whale', 'aliases': []},
# https://emojipedia.org/whale/
'1f40b': {'canonical_name': 'humpback_whale', 'aliases': []},
'1f40a': {'canonical_name': 'crocodile', 'aliases': []},
'1f406': {'canonical_name': 'leopard', 'aliases': []},
'1f405': {'canonical_name': 'tiger', 'aliases': []},
'1f403': {'canonical_name': 'water_buffalo', 'aliases': []},
'1f402': {'canonical_name': 'ox', 'aliases': ['bull']},
'1f404': {'canonical_name': 'cow', 'aliases': []},
'1f98c': {'canonical_name': 'deer', 'aliases': []},
# https://emojipedia.org/dromedary-camel/
'1f42a': {'canonical_name': 'arabian_camel', 'aliases': []},
'1f42b': {'canonical_name': 'camel', 'aliases': []},
'1f418': {'canonical_name': 'elephant', 'aliases': []},
'1f98f': {'canonical_name': 'rhinoceros', 'aliases': []},
'1f98d': {'canonical_name': 'gorilla', 'aliases': []},
'1f40e': {'canonical_name': 'horse', 'aliases': []},
'1f416': {'canonical_name': 'pig', 'aliases': ['oink']},
'1f410': {'canonical_name': 'goat', 'aliases': []},
'1f40f': {'canonical_name': 'ram', 'aliases': []},
'1f411': {'canonical_name': 'sheep', 'aliases': ['baa']},
'1f415': {'canonical_name': 'dog', 'aliases': ['woof']},
'1f429': {'canonical_name': 'poodle', 'aliases': []},
'1f408': {'canonical_name': 'cat', 'aliases': ['meow']},
# alarm seemed like a fun addition
'1f413': {'canonical_name': 'rooster', 'aliases': ['alarm', 'cock-a-doodle-doo']},
'1f983': {'canonical_name': 'turkey', 'aliases': []},
'1f54a': {'canonical_name': 'dove', 'aliases': ['dove_of_peace']},
'1f407': {'canonical_name': 'rabbit', 'aliases': []},
'1f401': {'canonical_name': 'mouse', 'aliases': []},
'1f400': {'canonical_name': 'rat', 'aliases': []},
'1f43f': {'canonical_name': 'chipmunk', 'aliases': []},
# paws seemed like reasonable addition. Put feet at People/135
'1f43e': {'canonical_name': 'paw_prints', 'aliases': ['paws']},
'1f409': {'canonical_name': 'dragon', 'aliases': []},
'1f432': {'canonical_name': 'dragon_face', 'aliases': []},
'1f335': {'canonical_name': 'cactus', 'aliases': []},
'1f384': {'canonical_name': 'holiday_tree', 'aliases': []},
'1f332': {'canonical_name': 'evergreen_tree', 'aliases': []},
'1f333': {'canonical_name': 'tree', 'aliases': ['deciduous_tree']},
'1f334': {'canonical_name': 'palm_tree', 'aliases': []},
# sprout seemed like a reasonable addition
'1f331': {'canonical_name': 'seedling', 'aliases': ['sprout']},
# seemed like the best emoji for plant
'1f33f': {'canonical_name': 'herb', 'aliases': ['plant']},
# clover seemed like a reasonable addition
'2618': {'canonical_name': 'shamrock', 'aliases': ['clover']},
# lucky seems more useful
'1f340': {'canonical_name': 'lucky', 'aliases': ['four_leaf_clover']},
'1f38d': {'canonical_name': 'bamboo', 'aliases': []},
# https://emojipedia.org/tanabata-tree/
'1f38b': {'canonical_name': 'wish_tree', 'aliases': ['tanabata_tree']},
# seemed like good additions. Used fall instead of autumn, since don't have
# the rest of the seasons, and could imagine someone using both meanings of
# fall.
'1f343': {'canonical_name': 'leaves', 'aliases': ['wind', 'fall']},
'1f342': {'canonical_name': 'fallen_leaf', 'aliases': []},
'1f341': {'canonical_name': 'maple_leaf', 'aliases': []},
'1f344': {'canonical_name': 'mushroom', 'aliases': []},
# harvest seems more useful
'1f33e': {'canonical_name': 'harvest', 'aliases': ['ear_of_rice']},
'1f490': {'canonical_name': 'bouquet', 'aliases': []},
# seems like the best emoji for flower
'1f337': {'canonical_name': 'tulip', 'aliases': ['flower']},
'1f339': {'canonical_name': 'rose', 'aliases': []},
# crushed suggest by a user
'1f940': {'canonical_name': 'wilted_flower', 'aliases': ['crushed']},
'1f33b': {'canonical_name': 'sunflower', 'aliases': []},
'1f33c': {'canonical_name': 'blossom', 'aliases': []},
'1f338': {'canonical_name': 'cherry_blossom', 'aliases': []},
'1f33a': {'canonical_name': 'hibiscus', 'aliases': []},
'1f30e': {'canonical_name': 'earth_americas', 'aliases': []},
'1f30d': {'canonical_name': 'earth_africa', 'aliases': []},
'1f30f': {'canonical_name': 'earth_asia', 'aliases': []},
'1f315': {'canonical_name': 'full_moon', 'aliases': []},
# too many useless moons. Don't seem to get much use on twitter, and clog
# up typeahead for moon.
# '1f316': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
# '1f317': {'canonical_name': 'X', 'aliases': ['last_quarter_moon']},
# '1f318': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
'1f311': {'canonical_name': 'new_moon', 'aliases': []},
# '1f312': {'canonical_name': 'X', 'aliases': ['waxing_crescent_moon']},
# '1f313': {'canonical_name': 'X', 'aliases': ['first_quarter_moon']},
'1f314': {'canonical_name': 'waxing_moon', 'aliases': []},
'1f31a': {'canonical_name': 'new_moon_face', 'aliases': []},
'1f31d': {'canonical_name': 'moon_face', 'aliases': []},
'1f31e': {'canonical_name': 'sun_face', 'aliases': []},
# goodnight seems way more useful
'1f31b': {'canonical_name': 'goodnight', 'aliases': []},
# '1f31c': {'canonical_name': 'X', 'aliases': ['last_quarter_moon_with_face']},
# seems like the best emoji for moon
'1f319': {'canonical_name': 'moon', 'aliases': []},
# dizzy taken by People/54, had to come up with something else
'1f4ab': {'canonical_name': 'seeing_stars', 'aliases': []},
'2b50': {'canonical_name': 'star', 'aliases': []},
# glowing_star from gemoji/unicode
'1f31f': {'canonical_name': 'glowing_star', 'aliases': []},
# glamour seems like a reasonable addition
'2728': {'canonical_name': 'sparkles', 'aliases': ['glamour']},
# high_voltage from gemoji/unicode
'26a1': {'canonical_name': 'high_voltage', 'aliases': ['zap']},
# https://emojipedia.org/fire/
'1f525': {'canonical_name': 'fire', 'aliases': ['lit', 'hot', 'flame']},
# explosion and crash seem like reasonable additions
'1f4a5': {'canonical_name': 'boom', 'aliases': ['explosion', 'crash', 'collision']},
# meteor seems like a reasonable addition
'2604': {'canonical_name': 'comet', 'aliases': ['meteor']},
'2600': {'canonical_name': 'sunny', 'aliases': []},
'1f324': {'canonical_name': 'mostly_sunny', 'aliases': []},
# partly_cloudy for the glass half empty people
'26c5': {'canonical_name': 'partly_sunny', 'aliases': ['partly_cloudy']},
'1f325': {'canonical_name': 'cloudy', 'aliases': []},
# sunshowers seems like a more fun term
'1f326': {'canonical_name': 'sunshowers', 'aliases': ['sun_and_rain', 'partly_sunny_with_rain']},
# pride and lgbtq seem like reasonable additions
'1f308': {'canonical_name': 'rainbow', 'aliases': ['pride', 'lgbtq']},
# overcast seems like a good addition
'2601': {'canonical_name': 'cloud', 'aliases': ['overcast']},
# suggested by user typing these into their typeahead.
'1f327': {'canonical_name': 'rainy', 'aliases': ['soaked', 'drenched']},
# thunderstorm seems better for this emoji, and thunder_and_rain more
# evocative than thunder_cloud_and_rain
'26c8': {'canonical_name': 'thunderstorm', 'aliases': ['thunder_and_rain']},
# lightning_storm seemed better than lightning_cloud
'1f329': {'canonical_name': 'lightning', 'aliases': ['lightning_storm']},
# snowy to parallel sunny, cloudy, etc; snowstorm seems like a good
# addition
'1f328': {'canonical_name': 'snowy', 'aliases': ['snowstorm']},
'2603': {'canonical_name': 'snowman', 'aliases': []},
# don't need two snowmen. frosty is nice because it's a weather (primary
# benefit) and also a snowman (one that suffered from not having snow, in
# fact)
'26c4': {'canonical_name': 'frosty', 'aliases': []},
'2744': {'canonical_name': 'snowflake', 'aliases': []},
# the internet didn't seem to have a good use for this emoji. windy is a
# good weather that is otherwise not represented. mother_nature from
# https://emojipedia.org/wind-blowing-face/
'1f32c': {'canonical_name': 'windy', 'aliases': ['mother_nature']},
'1f4a8': {'canonical_name': 'dash', 'aliases': []},
# tornado_cloud comes from the unicode, but e.g. gemoji drops the cloud
'1f32a': {'canonical_name': 'tornado', 'aliases': []},
# hazy seemed like a good addition
'1f32b': {'canonical_name': 'fog', 'aliases': ['hazy']},
'1f30a': {'canonical_name': 'ocean', 'aliases': []},
# drop seems better than droplet, since could be used for its other
# meanings. water drop partly so that it shows up in typeahead for water
'1f4a7': {'canonical_name': 'drop', 'aliases': ['water_drop']},
'1f4a6': {'canonical_name': 'sweat_drops', 'aliases': []},
'2614': {'canonical_name': 'umbrella_with_rain', 'aliases': []},
'1f34f': {'canonical_name': 'green_apple', 'aliases': []},
'1f34e': {'canonical_name': 'apple', 'aliases': []},
'1f350': {'canonical_name': 'pear', 'aliases': []},
# An argument for not calling this orange is to save the color for a color
# swatch, but we can deal with that when it happens. Mandarin is from
# https://emojipedia.org/tangerine/, also like that it has a second meaning
'1f34a': {'canonical_name': 'orange', 'aliases': ['tangerine', 'mandarin']},
'1f34b': {'canonical_name': 'lemon', 'aliases': []},
'1f34c': {'canonical_name': 'banana', 'aliases': []},
'1f349': {'canonical_name': 'watermelon', 'aliases': []},
'1f347': {'canonical_name': 'grapes', 'aliases': []},
'1f353': {'canonical_name': 'strawberry', 'aliases': []},
'1f348': {'canonical_name': 'melon', 'aliases': []},
'1f352': {'canonical_name': 'cherries', 'aliases': []},
'1f351': {'canonical_name': 'peach', 'aliases': []},
'1f34d': {'canonical_name': 'pineapple', 'aliases': []},
'1f95d': {'canonical_name': 'kiwi', 'aliases': []},
'1f951': {'canonical_name': 'avocado', 'aliases': []},
'1f345': {'canonical_name': 'tomato', 'aliases': []},
'1f346': {'canonical_name': 'eggplant', 'aliases': []},
'1f952': {'canonical_name': 'cucumber', 'aliases': []},
'1f955': {'canonical_name': 'carrot', 'aliases': []},
# maize is from unicode
'1f33d': {'canonical_name': 'corn', 'aliases': ['maize']},
# chili_pepper seems like a reasonable addition
'1f336': {'canonical_name': 'hot_pepper', 'aliases': ['chili_pepper']},
'1f954': {'canonical_name': 'potato', 'aliases': []},
# yam seems better than sweet_potato, since we already have a potato (not a
# strong argument, but is better on the typeahead not to have emoji that
# share long prefixes)
'1f360': {'canonical_name': 'yam', 'aliases': ['sweet_potato']},
'1f330': {'canonical_name': 'chestnut', 'aliases': []},
'1f95c': {'canonical_name': 'peanuts', 'aliases': []},
'1f36f': {'canonical_name': 'honey', 'aliases': []},
'1f950': {'canonical_name': 'croissant', 'aliases': []},
'1f35e': {'canonical_name': 'bread', 'aliases': []},
'1f956': {'canonical_name': 'baguette', 'aliases': []},
'1f9c0': {'canonical_name': 'cheese', 'aliases': []},
'1f95a': {'canonical_name': 'egg', 'aliases': []},
# already have an egg in Foods/31, though I guess wouldn't be a big deal to
# add it here.
'1f373': {'canonical_name': 'cooking', 'aliases': []},
'1f953': {'canonical_name': 'bacon', 'aliases': []},
# there's no lunch and dinner, which is a small negative against adding
# breakfast
'1f95e': {'canonical_name': 'pancakes', 'aliases': ['breakfast']},
# There is already shrimp in Nature/51, and tempura seems like a better
# description
'1f364': {'canonical_name': 'tempura', 'aliases': []},
# drumstick seems like a better description
'1f357': {'canonical_name': 'drumstick', 'aliases': ['poultry']},
'1f356': {'canonical_name': 'meat', 'aliases': []},
'1f355': {'canonical_name': 'pizza', 'aliases': []},
'1f32d': {'canonical_name': 'hotdog', 'aliases': []},
'1f354': {'canonical_name': 'hamburger', 'aliases': []},
'1f35f': {'canonical_name': 'fries', 'aliases': []},
# https://emojipedia.org/stuffed-flatbread/
'1f959': {'canonical_name': 'doner_kebab', 'aliases': ['shawarma', 'souvlaki', 'stuffed_flatbread']},
'1f32e': {'canonical_name': 'taco', 'aliases': []},
'1f32f': {'canonical_name': 'burrito', 'aliases': []},
'1f957': {'canonical_name': 'salad', 'aliases': []},
# I think Foods/49 is a better :food:
'1f958': {'canonical_name': 'paella', 'aliases': []},
'1f35d': {'canonical_name': 'spaghetti', 'aliases': []},
# seems like the best noodles? maybe this should be Foods/47? Noodles seem
# like a bigger thing in east asia than in europe, so going with that.
'1f35c': {'canonical_name': 'ramen', 'aliases': ['noodles']},
# seems like the best :food:. Also a reasonable :soup:, though the google
# one is indeed more a pot of food (the unicode) than a soup
'1f372': {'canonical_name': 'food', 'aliases': ['soup', 'stew']},
# naruto is actual name, and I think don't need this to autocomplete for
# "fish"
'1f365': {'canonical_name': 'naruto', 'aliases': []},
'1f363': {'canonical_name': 'sushi', 'aliases': []},
'1f371': {'canonical_name': 'bento', 'aliases': []},
'1f35b': {'canonical_name': 'curry', 'aliases': []},
'1f35a': {'canonical_name': 'rice', 'aliases': []},
# onigiri is actual name, and I think don't need this to typeahead complete
# for "rice"
'1f359': {'canonical_name': 'onigiri', 'aliases': []},
# leaving rice_cracker in, so that we have something for cracker
'1f358': {'canonical_name': 'senbei', 'aliases': ['rice_cracker']},
'1f362': {'canonical_name': 'oden', 'aliases': []},
'1f361': {'canonical_name': 'dango', 'aliases': []},
'1f367': {'canonical_name': 'shaved_ice', 'aliases': []},
# seemed like the best emoji for gelato
'1f368': {'canonical_name': 'ice_cream', 'aliases': ['gelato']},
# already have ice_cream in Foods/60, and soft_serve seems like a
# potentially fun emoji to have in conjunction with ice_cream. Put in
# soft_ice_cream so it typeahead completes on ice_cream as well.
'1f366': {'canonical_name': 'soft_serve', 'aliases': ['soft_ice_cream']},
'1f370': {'canonical_name': 'cake', 'aliases': []},
'1f382': {'canonical_name': 'birthday', 'aliases': []},
# flan seems like a reasonable addition
'1f36e': {'canonical_name': 'custard', 'aliases': ['flan']},
'1f36d': {'canonical_name': 'lollipop', 'aliases': []},
'1f36c': {'canonical_name': 'candy', 'aliases': []},
'1f36b': {'canonical_name': 'chocolate', 'aliases': []},
'1f37f': {'canonical_name': 'popcorn', 'aliases': []},
# donut dominates doughnut on
# https://trends.google.com/trends/explore?q=doughnut,donut
'1f369': {'canonical_name': 'donut', 'aliases': ['doughnut']},
'1f36a': {'canonical_name': 'cookie', 'aliases': []},
'1f95b': {'canonical_name': 'milk', 'aliases': ['glass_of_milk']},
'1f37c': {'canonical_name': 'baby_bottle', 'aliases': []},
'2615': {'canonical_name': 'coffee', 'aliases': []},
'1f375': {'canonical_name': 'tea', 'aliases': []},
'1f376': {'canonical_name': 'sake', 'aliases': []},
'1f37a': {'canonical_name': 'beer', 'aliases': []},
'1f37b': {'canonical_name': 'beers', 'aliases': []},
'1f942': {'canonical_name': 'clink', 'aliases': ['toast']},
'1f377': {'canonical_name': 'wine', 'aliases': []},
# tumbler means something different in india, and don't want to use
# shot_glass given our policy of using school-age-appropriate terms
'1f943': {'canonical_name': 'small_glass', 'aliases': []},
'1f378': {'canonical_name': 'cocktail', 'aliases': []},
'1f379': {'canonical_name': 'tropical_drink', 'aliases': []},
'1f37e': {'canonical_name': 'champagne', 'aliases': []},
'1f944': {'canonical_name': 'spoon', 'aliases': []},
# Added eating_utensils so this would show up in typeahead for eat.
'1f374': {'canonical_name': 'fork_and_knife', 'aliases': ['eating_utensils']},
# Seems like the best emoji for hungry and meal. fork_and_knife_and_plate
# is from gemoji/unicode, and I think is better than the shorter iamcal
# version in this case. The rest just seemed like good additions.
'1f37d': {'canonical_name': 'hungry', 'aliases': ['meal', 'table_setting', 'fork_and_knife_with_plate', 'lets_eat']}, # ignorelongline
# most people interested in this sport call it football
'26bd': {'canonical_name': 'football', 'aliases': ['soccer']},
'1f3c0': {'canonical_name': 'basketball', 'aliases': []},
# to distinguish from Activity/1, but is also the unicode name
'1f3c8': {'canonical_name': 'american_football', 'aliases': []},
'26be': {'canonical_name': 'baseball', 'aliases': []},
'1f3be': {'canonical_name': 'tennis', 'aliases': []},
'1f3d0': {'canonical_name': 'volleyball', 'aliases': []},
'1f3c9': {'canonical_name': 'rugby', 'aliases': []},
# https://emojipedia.org/billiards/ suggests this is actually used for
# billiards, not for "unlucky" or "losing" or some other connotation of
# 8ball. The unicode name is billiards.
'1f3b1': {'canonical_name': 'billiards', 'aliases': ['pool', '8_ball']},
# ping pong is the unicode name, and seems slightly more popular on
# https://trends.google.com/trends/explore?q=table%20tennis,ping%20pong
'1f3d3': {'canonical_name': 'ping_pong', 'aliases': ['table_tennis']},
'1f3f8': {'canonical_name': 'badminton', 'aliases': []},
# gooooooooal seems more useful of a name, though arguably this isn't the
# best emoji for it
'1f945': {'canonical_name': 'gooooooooal', 'aliases': ['goal']},
'1f3d2': {'canonical_name': 'ice_hockey', 'aliases': []},
'1f3d1': {'canonical_name': 'field_hockey', 'aliases': []},
# would say bat, but taken by Nature/30
'1f3cf': {'canonical_name': 'cricket', 'aliases': ['cricket_bat']},
# hole_in_one seems like a more useful name to have. Sent golf to
# Activity/39
'26f3': {'canonical_name': 'hole_in_one', 'aliases': []},
# archery seems like a reasonable addition
'1f3f9': {'canonical_name': 'bow_and_arrow', 'aliases': ['archery']},
'1f3a3': {'canonical_name': 'fishing', 'aliases': []},
'1f94a': {'canonical_name': 'boxing_glove', 'aliases': []},
# keikogi and dogi are the actual names for this, I believe. black_belt is
# I think a more useful name here
'1f94b': {'canonical_name': 'black_belt', 'aliases': ['keikogi', 'dogi', 'martial_arts']},
'26f8': {'canonical_name': 'ice_skate', 'aliases': []},
'1f3bf': {'canonical_name': 'ski', 'aliases': []},
'26f7': {'canonical_name': 'skier', 'aliases': []},
'1f3c2': {'canonical_name': 'snowboarder', 'aliases': []},
# lift is both what lifters call it, and potentially can be used more
# generally than weight_lift. The others seemed like good additions.
'1f3cb': {'canonical_name': 'lift', 'aliases': ['work_out', 'weight_lift', 'gym']},
# The decisions on tenses here and in the rest of the sports section are
# mostly from gut feel. The unicode itself is all over the place.
'1f93a': {'canonical_name': 'fencing', 'aliases': []},
'1f93c': {'canonical_name': 'wrestling', 'aliases': []},
# seemed like reasonable additions
'1f938': {'canonical_name': 'cartwheel', 'aliases': ['acrobatics', 'gymnastics', 'tumbling']},
# seemed the best emoji for sports
'26f9': {'canonical_name': 'ball', 'aliases': ['sports']},
'1f93e': {'canonical_name': 'handball', 'aliases': []},
'1f3cc': {'canonical_name': 'golf', 'aliases': []},
'1f3c4': {'canonical_name': 'surf', 'aliases': []},
'1f3ca': {'canonical_name': 'swim', 'aliases': []},
'1f93d': {'canonical_name': 'water_polo', 'aliases': []},
# rest seem like reasonable additions
'1f6a3': {'canonical_name': 'rowboat', 'aliases': ['crew', 'sculling', 'rowing']},
# horse_riding seems like a reasonable addition
'1f3c7': {'canonical_name': 'horse_racing', 'aliases': ['horse_riding']},
# at least in the US: this = cyclist, Activity/53 = mountain biker, and
# motorcyclist = biker. Mainly from googling around and personal
# experience. E.g. http://grammarist.com/usage/cyclist-biker/ for cyclist
# and biker,
# https://www.theguardian.com/lifeandstyle/2010/oct/24/bike-snobs-guide-cycling-tribes
# for mountain biker (I've never heard the term "mountain cyclist", and
# they are the only group on that page that gets "biker" instead of
# "cyclist")
'1f6b4': {'canonical_name': 'cyclist', 'aliases': []},
# see Activity/51
'1f6b5': {'canonical_name': 'mountain_biker', 'aliases': []},
'1f3bd': {'canonical_name': 'running_shirt', 'aliases': []},
# I feel like people call sports medals "medals", and military medals
# "military medals". Also see Activity/56
'1f3c5': {'canonical_name': 'medal', 'aliases': []},
# See Activity/55. military_medal is the gemoji/unicode
'1f396': {'canonical_name': 'military_medal', 'aliases': []},
# gold and number_one seem like good additions
'1f947': {'canonical_name': 'first_place', 'aliases': ['gold', 'number_one']},
# to parallel Activity/57
'1f948': {'canonical_name': 'second_place', 'aliases': ['silver']},
# to parallel Activity/57
'1f949': {'canonical_name': 'third_place', 'aliases': ['bronze']},
# seemed the best emoji for winner
'1f3c6': {'canonical_name': 'trophy', 'aliases': ['winner']},
'1f3f5': {'canonical_name': 'rosette', 'aliases': []},
'1f397': {'canonical_name': 'reminder_ribbon', 'aliases': []},
# don't need ticket and admission_ticket (see Activity/64), so made one of
# them :pass:.
'1f3ab': {'canonical_name': 'pass', 'aliases': []},
# see Activity/63
'1f39f': {'canonical_name': 'ticket', 'aliases': []},
'1f3aa': {'canonical_name': 'circus', 'aliases': []},
'1f939': {'canonical_name': 'juggling', 'aliases': []},
# rest seem like good additions
'1f3ad': {'canonical_name': 'performing_arts', 'aliases': ['drama', 'theater']},
# rest seem like good additions
'1f3a8': {'canonical_name': 'art', 'aliases': ['palette', 'painting']},
# action seems more useful than clapper, and clapper doesn't seem like that
# common of a term
'1f3ac': {'canonical_name': 'action', 'aliases': []},
# seem like good additions
'1f3a4': {'canonical_name': 'microphone', 'aliases': ['mike', 'mic']},
'1f3a7': {'canonical_name': 'headphones', 'aliases': []},
'1f3bc': {'canonical_name': 'musical_score', 'aliases': []},
# piano seems more useful than musical_keyboard
'1f3b9': {'canonical_name': 'piano', 'aliases': ['musical_keyboard']},
'1f941': {'canonical_name': 'drum', 'aliases': []},
'1f3b7': {'canonical_name': 'saxophone', 'aliases': []},
'1f3ba': {'canonical_name': 'trumpet', 'aliases': []},
'1f3b8': {'canonical_name': 'guitar', 'aliases': []},
'1f3bb': {'canonical_name': 'violin', 'aliases': []},
# dice seems more useful
'1f3b2': {'canonical_name': 'dice', 'aliases': ['die']},
# direct_hit from gemoji/unicode, and seems more useful. bulls_eye seemed
# like a reasonable addition
'1f3af': {'canonical_name': 'direct_hit', 'aliases': ['darts', 'bulls_eye']},
# strike seemed more useful than bowling
'1f3b3': {'canonical_name': 'strike', 'aliases': ['bowling']},
'1f3ae': {'canonical_name': 'video_game', 'aliases': []},
# gambling seemed more useful than slot_machine
'1f3b0': {'canonical_name': 'slot_machine', 'aliases': []},
# the google emoji for this is not red
'1f697': {'canonical_name': 'car', 'aliases': []},
# rideshare seems like a reasonable addition
'1f695': {'canonical_name': 'taxi', 'aliases': ['rideshare']},
# the google emoji for this is not blue. recreational_vehicle is from
# gemoji/unicode, jeep seemed like a good addition
'1f699': {'canonical_name': 'recreational_vehicle', 'aliases': ['jeep']},
# school_bus seemed like a reasonable addition, even though the twitter
# glyph for this doesn't really look like a school bus
'1f68c': {'canonical_name': 'bus', 'aliases': ['school_bus']},
'1f68e': {'canonical_name': 'trolley', 'aliases': []},
'1f3ce': {'canonical_name': 'racecar', 'aliases': []},
'1f693': {'canonical_name': 'police_car', 'aliases': []},
'1f691': {'canonical_name': 'ambulance', 'aliases': []},
# https://trends.google.com/trends/explore?q=fire%20truck,fire%20engine
'1f692': {'canonical_name': 'fire_truck', 'aliases': ['fire_engine']},
'1f690': {'canonical_name': 'minibus', 'aliases': []},
# moving_truck and truck for Places/11 and Places/12 seem much better than
# the iamcal names
'1f69a': {'canonical_name': 'moving_truck', 'aliases': []},
# see Places/11 for truck. Rest seem reasonable additions.
'1f69b': {'canonical_name': 'truck', 'aliases': ['tractor-trailer', 'big_rig', 'semi_truck', 'transport_truck']}, # ignorelongline
'1f69c': {'canonical_name': 'tractor', 'aliases': []},
# kick_scooter and scooter seem better for Places/14 and Places /16 than
# scooter and motor_scooter.
'1f6f4': {'canonical_name': 'kick_scooter', 'aliases': []},
'1f6b2': {'canonical_name': 'bike', 'aliases': ['bicycle']},
# see Places/14. Called motor_bike (or bike) in India
'1f6f5': {'canonical_name': 'scooter', 'aliases': ['motor_bike']},
'1f3cd': {'canonical_name': 'motorcycle', 'aliases': []},
# siren seems more useful. alert seems like a reasonable addition
'1f6a8': {'canonical_name': 'siren', 'aliases': ['rotating_light', 'alert']},
'1f694': {'canonical_name': 'oncoming_police_car', 'aliases': []},
'1f68d': {'canonical_name': 'oncoming_bus', 'aliases': []},
# car to parallel e.g. Places/1
'1f698': {'canonical_name': 'oncoming_car', 'aliases': ['oncoming_automobile']},
'1f696': {'canonical_name': 'oncoming_taxi', 'aliases': []},
# ski_lift seems like a good addition
'1f6a1': {'canonical_name': 'aerial_tramway', 'aliases': ['ski_lift']},
# gondola seems more useful
'1f6a0': {'canonical_name': 'gondola', 'aliases': ['mountain_cableway']},
'1f69f': {'canonical_name': 'suspension_railway', 'aliases': []},
# train_car seems like a reasonable addition
'1f683': {'canonical_name': 'railway_car', 'aliases': ['train_car']},
# this does not seem like a good emoji for train, especially compared to
# Places/33. streetcar seems like a good addition.
'1f68b': {'canonical_name': 'tram', 'aliases': ['streetcar']},
'1f69e': {'canonical_name': 'mountain_railway', 'aliases': []},
# elevated_train seems like a reasonable addition
'1f69d': {'canonical_name': 'monorail', 'aliases': ['elevated_train']},
# from gemoji/unicode. Also, don't thin we need two bullettrain's
'1f684': {'canonical_name': 'high_speed_train', 'aliases': []},
# google, wikipedia, etc prefer bullet train to bullettrain
'1f685': {'canonical_name': 'bullet_train', 'aliases': []},
'1f688': {'canonical_name': 'light_rail', 'aliases': []},
'1f682': {'canonical_name': 'train', 'aliases': ['steam_locomotive']},
# oncoming_train seems better than train2
'1f686': {'canonical_name': 'oncoming_train', 'aliases': []},
# saving metro for Symbols/108. The tunnel makes subway more appropriate
# anyway.
'1f687': {'canonical_name': 'subway', 'aliases': []},
# all the glyphs of oncoming vehicles have names like oncoming_*. The
# alternate names are to parallel the alternates to Places/27.
'1f68a': {'canonical_name': 'oncoming_tram', 'aliases': ['oncoming_streetcar', 'oncoming_trolley']},
'1f689': {'canonical_name': 'station', 'aliases': []},
'1f681': {'canonical_name': 'helicopter', 'aliases': []},
'1f6e9': {'canonical_name': 'small_airplane', 'aliases': []},
'2708': {'canonical_name': 'airplane', 'aliases': []},
# take_off seems more useful than airplane_departure. departure also seems
# more useful than airplane_departure. Arguably departure should be the
# primary, since arrival is probably more useful than landing in Places/42,
# but going with this for now.
'1f6eb': {'canonical_name': 'take_off', 'aliases': ['departure', 'airplane_departure']},
# parallel to Places/41
'1f6ec': {'canonical_name': 'landing', 'aliases': ['arrival', 'airplane_arrival']},
'1f680': {'canonical_name': 'rocket', 'aliases': []},
'1f6f0': {'canonical_name': 'satellite', 'aliases': []},
'1f4ba': {'canonical_name': 'seat', 'aliases': []},
'1f6f6': {'canonical_name': 'canoe', 'aliases': []},
'26f5': {'canonical_name': 'boat', 'aliases': ['sailboat']},
'1f6e5': {'canonical_name': 'motor_boat', 'aliases': []},
'1f6a4': {'canonical_name': 'speedboat', 'aliases': []},
# yatch and cruise seem like reasonable additions
'1f6f3': {'canonical_name': 'passenger_ship', 'aliases': ['yacht', 'cruise']},
'26f4': {'canonical_name': 'ferry', 'aliases': []},
'1f6a2': {'canonical_name': 'ship', 'aliases': []},
'2693': {'canonical_name': 'anchor', 'aliases': []},
# there already is a construction in Places/82, and work_in_progress seems
# like a useful thing to have. Construction_zone seems better than the
# unicode construction_sign, and is there partly so this autocompletes for
# construction.
'1f6a7': {'canonical_name': 'work_in_progress', 'aliases': ['construction_zone']},
# alternates from https://emojipedia.org/fuel-pump/. unicode is fuel_pump,
# not fuelpump
'26fd': {'canonical_name': 'fuel_pump', 'aliases': ['gas_pump', 'petrol_pump']},
# not sure why iamcal removed the space
'1f68f': {'canonical_name': 'bus_stop', 'aliases': []},
# https://emojipedia.org/vertical-traffic-light/ thinks this is the more
# common of the two traffic lights, so putting traffic_light on this one
'1f6a6': {'canonical_name': 'traffic_light', 'aliases': ['vertical_traffic_light']},
# see Places/57
'1f6a5': {'canonical_name': 'horizontal_traffic_light', 'aliases': []},
# road_trip from http://mashable.com/2015/10/23/ios-9-1-emoji-guide
'1f5fa': {'canonical_name': 'map', 'aliases': ['world_map', 'road_trip']},
# rock_carving, statue, and tower seem more general and less culturally
# specific, for Places/60, 61, and 63.
'1f5ff': {'canonical_name': 'rock_carving', 'aliases': ['moyai']},
# new_york from https://emojipedia.org/statue-of-liberty/. see Places/60
# for statue
'1f5fd': {'canonical_name': 'statue', 'aliases': ['new_york', 'statue_of_liberty']},
'26f2': {'canonical_name': 'fountain', 'aliases': []},
# see Places/60
'1f5fc': {'canonical_name': 'tower', 'aliases': ['tokyo_tower']},
# choosing this as the castle since castles are a way bigger thing in
# europe than japan, and shiro is a pretty reasonable name for Places/65
'1f3f0': {'canonical_name': 'castle', 'aliases': []},
# see Places/64
'1f3ef': {'canonical_name': 'shiro', 'aliases': []},
'1f3df': {'canonical_name': 'stadium', 'aliases': []},
'1f3a1': {'canonical_name': 'ferris_wheel', 'aliases': []},
'1f3a2': {'canonical_name': 'roller_coaster', 'aliases': []},
# merry_go_round seems like a good addition
'1f3a0': {'canonical_name': 'carousel', 'aliases': ['merry_go_round']},
# beach_umbrella seems more useful
'26f1': {'canonical_name': 'beach_umbrella', 'aliases': []},
'1f3d6': {'canonical_name': 'beach', 'aliases': []},
'1f3dd': {'canonical_name': 'island', 'aliases': []},
'26f0': {'canonical_name': 'mountain', 'aliases': []},
'1f3d4': {'canonical_name': 'snowy_mountain', 'aliases': []},
# already lots of other mountains, otherwise would rename this like
# Places/60
'1f5fb': {'canonical_name': 'mount_fuji', 'aliases': []},
'1f30b': {'canonical_name': 'volcano', 'aliases': []},
'1f3dc': {'canonical_name': 'desert', 'aliases': []},
# campsite from https://emojipedia.org/camping/, I think Places/79 is a
# better camping
'1f3d5': {'canonical_name': 'campsite', 'aliases': []},
'26fa': {'canonical_name': 'tent', 'aliases': ['camping']},
'1f6e4': {'canonical_name': 'railway_track', 'aliases': ['train_tracks']},
# road is used much more frequently at
# https://trends.google.com/trends/explore?q=road,motorway
'1f6e3': {'canonical_name': 'road', 'aliases': ['motorway']},
'1f3d7': {'canonical_name': 'construction', 'aliases': []},
'1f3ed': {'canonical_name': 'factory', 'aliases': []},
'1f3e0': {'canonical_name': 'house', 'aliases': []},
# suburb seems more useful
'1f3e1': {'canonical_name': 'suburb', 'aliases': []},
'1f3d8': {'canonical_name': 'houses', 'aliases': []},
# condemned seemed like a good addition
'1f3da': {'canonical_name': 'derelict_house', 'aliases': ['condemned']},
'1f3e2': {'canonical_name': 'office', 'aliases': []},
'1f3ec': {'canonical_name': 'department_store', 'aliases': []},
'1f3e3': {'canonical_name': 'japan_post', 'aliases': []},
'1f3e4': {'canonical_name': 'post_office', 'aliases': []},
'1f3e5': {'canonical_name': 'hospital', 'aliases': []},
'1f3e6': {'canonical_name': 'bank', 'aliases': []},
'1f3e8': {'canonical_name': 'hotel', 'aliases': []},
'1f3ea': {'canonical_name': 'convenience_store', 'aliases': []},
'1f3eb': {'canonical_name': 'school', 'aliases': []},
'1f3e9': {'canonical_name': 'love_hotel', 'aliases': []},
'1f492': {'canonical_name': 'wedding', 'aliases': []},
'1f3db': {'canonical_name': 'classical_building', 'aliases': []},
'26ea': {'canonical_name': 'church', 'aliases': []},
'1f54c': {'canonical_name': 'mosque', 'aliases': []},
'1f54d': {'canonical_name': 'synagogue', 'aliases': []},
'1f54b': {'canonical_name': 'kaaba', 'aliases': []},
'26e9': {'canonical_name': 'shinto_shrine', 'aliases': []},
'1f5fe': {'canonical_name': 'japan', 'aliases': []},
# rice_scene seems like a strange name to have. gemoji alternate is
# moon_ceremony
'1f391': {'canonical_name': 'moon_ceremony', 'aliases': []},
'1f3de': {'canonical_name': 'national_park', 'aliases': []},
# ocean_sunrise to parallel Places/109
'1f305': {'canonical_name': 'sunrise', 'aliases': ['ocean_sunrise']},
'1f304': {'canonical_name': 'mountain_sunrise', 'aliases': []},
# shooting_star and wish seem like way better descriptions. gemoji/unicode
# is shooting_star
'1f320': {'canonical_name': 'shooting_star', 'aliases': ['wish']},
'1f387': {'canonical_name': 'sparkler', 'aliases': []},
'1f386': {'canonical_name': 'fireworks', 'aliases': []},
'1f307': {'canonical_name': 'city_sunrise', 'aliases': []},
'1f306': {'canonical_name': 'sunset', 'aliases': []},
# city and skyline seem more useful than cityscape
'1f3d9': {'canonical_name': 'city', 'aliases': ['skyline']},
'1f303': {'canonical_name': 'night', 'aliases': []},
# night_sky seems like a good addition
'1f30c': {'canonical_name': 'milky_way', 'aliases': ['night_sky']},
'1f309': {'canonical_name': 'bridge', 'aliases': []},
'1f301': {'canonical_name': 'foggy', 'aliases': []},
'231a': {'canonical_name': 'watch', 'aliases': []},
# unicode/gemoji is mobile_phone. The rest seem like good additions
'1f4f1': {'canonical_name': 'mobile_phone', 'aliases': ['smartphone', 'iphone', 'android']},
'1f4f2': {'canonical_name': 'calling', 'aliases': []},
# gemoji has laptop, even though the google emoji for this does not look
# like a laptop
'1f4bb': {'canonical_name': 'computer', 'aliases': ['laptop']},
'2328': {'canonical_name': 'keyboard', 'aliases': []},
'1f5a5': {'canonical_name': 'desktop_computer', 'aliases': []},
'1f5a8': {'canonical_name': 'printer', 'aliases': []},
# gemoji/unicode is computer_mouse
'1f5b1': {'canonical_name': 'computer_mouse', 'aliases': []},
'1f5b2': {'canonical_name': 'trackball', 'aliases': []},
# arcade seems like a reasonable addition
'1f579': {'canonical_name': 'joystick', 'aliases': ['arcade']},
# vise seems like a reasonable addition
'1f5dc': {'canonical_name': 'compression', 'aliases': ['vise']},
# gold record seems more useful, idea came from
# http://www.11points.com/Web-Tech/11_Emoji_With_Different_Meanings_Than_You_Think
'1f4bd': {'canonical_name': 'gold_record', 'aliases': ['minidisc']},
'1f4be': {'canonical_name': 'floppy_disk', 'aliases': []},
'1f4bf': {'canonical_name': 'cd', 'aliases': []},
'1f4c0': {'canonical_name': 'dvd', 'aliases': []},
# videocassette from gemoji/unicode
'1f4fc': {'canonical_name': 'vhs', 'aliases': ['videocassette']},
'1f4f7': {'canonical_name': 'camera', 'aliases': []},
# both of these seem more useful than camera_with_flash
'1f4f8': {'canonical_name': 'taking_a_picture', 'aliases': ['say_cheese']},
# video_recorder seems like a reasonable addition
'1f4f9': {'canonical_name': 'video_camera', 'aliases': ['video_recorder']},
'1f3a5': {'canonical_name': 'movie_camera', 'aliases': []},
# seems like the best emoji for movie
'1f4fd': {'canonical_name': 'projector', 'aliases': ['movie']},
'1f39e': {'canonical_name': 'film', 'aliases': []},
# both of these seem more useful than telephone_receiver
'1f4de': {'canonical_name': 'landline', 'aliases': ['home_phone']},
'260e': {'canonical_name': 'phone', 'aliases': ['telephone']},
'1f4df': {'canonical_name': 'pager', 'aliases': []},
'1f4e0': {'canonical_name': 'fax', 'aliases': []},
'1f4fa': {'canonical_name': 'tv', 'aliases': ['television']},
'1f4fb': {'canonical_name': 'radio', 'aliases': []},
'1f399': {'canonical_name': 'studio_microphone', 'aliases': []},
# volume seems more useful
'1f39a': {'canonical_name': 'volume', 'aliases': ['level_slider']},
'1f39b': {'canonical_name': 'control_knobs', 'aliases': []},
'23f1': {'canonical_name': 'stopwatch', 'aliases': []},
'23f2': {'canonical_name': 'timer', 'aliases': []},
'23f0': {'canonical_name': 'alarm_clock', 'aliases': []},
'1f570': {'canonical_name': 'mantelpiece_clock', 'aliases': []},
# times_up and time_ticking seem more useful than the hourglass names
'231b': {'canonical_name': 'times_up', 'aliases': ['hourglass_done']},
# seems like the better hourglass. Also see Objects/36
'23f3': {'canonical_name': 'time_ticking', 'aliases': ['hourglass']},
'1f4e1': {'canonical_name': 'satellite_antenna', 'aliases': []},
# seems like a reasonable addition
'1f50b': {'canonical_name': 'battery', 'aliases': ['full_battery']},
'1f50c': {'canonical_name': 'electric_plug', 'aliases': []},
# light_bulb seems better and from unicode/gemoji. idea seems like a good
# addition
'1f4a1': {'canonical_name': 'light_bulb', 'aliases': ['bulb', 'idea']},
'1f526': {'canonical_name': 'flashlight', 'aliases': []},
'1f56f': {'canonical_name': 'candle', 'aliases': []},
# seems like a reasonable addition
'1f5d1': {'canonical_name': 'wastebasket', 'aliases': ['trash_can']},
# http://www.iemoji.com/view/emoji/1173/objects/oil-drum
'1f6e2': {'canonical_name': 'oil_drum', 'aliases': ['commodities']},
# losing money from https://emojipedia.org/money-with-wings/,
# easy_come_easy_go seems like a reasonable addition
'1f4b8': {'canonical_name': 'losing_money', 'aliases': ['easy_come_easy_go', 'money_with_wings']},
# I think the _bills, _banknotes etc versions of these are arguably more
# fun to use in chat, and certainly match the glyphs better
'1f4b5': {'canonical_name': 'dollar_bills', 'aliases': []},
'1f4b4': {'canonical_name': 'yen_banknotes', 'aliases': []},
'1f4b6': {'canonical_name': 'euro_banknotes', 'aliases': []},
'1f4b7': {'canonical_name': 'pound_notes', 'aliases': []},
'1f4b0': {'canonical_name': 'money', 'aliases': []},
'1f4b3': {'canonical_name': 'credit_card', 'aliases': ['debit_card']},
'1f48e': {'canonical_name': 'gem', 'aliases': ['crystal']},
# justice seems more useful
'2696': {'canonical_name': 'justice', 'aliases': ['scales', 'balance']},
# fixing, at_work, and working_on_it seem like useful concepts for
# workplace chat
'1f527': {'canonical_name': 'fixing', 'aliases': ['wrench']},
'1f528': {'canonical_name': 'hammer', 'aliases': ['maintenance', 'handyman', 'handywoman']},
'2692': {'canonical_name': 'at_work', 'aliases': ['hammer_and_pick']},
# something that might be useful for chat.zulip.org, even
'1f6e0': {'canonical_name': 'working_on_it', 'aliases': ['hammer_and_wrench', 'tools']},
'26cf': {'canonical_name': 'mine', 'aliases': ['pick']},
# screw is somewhat inappropriate, but not openly so, so leaving it in
'1f529': {'canonical_name': 'nut_and_bolt', 'aliases': ['screw']},
'2699': {'canonical_name': 'gear', 'aliases': ['settings', 'mechanical', 'engineer']},
'26d3': {'canonical_name': 'chains', 'aliases': []},
'1f52b': {'canonical_name': 'gun', 'aliases': []},
'1f4a3': {'canonical_name': 'bomb', 'aliases': []},
# betrayed from http://www.iemoji.com/view/emoji/786/objects/kitchen-knife
'1f52a': {'canonical_name': 'knife', 'aliases': ['hocho', 'betrayed']},
# rated_for_violence from
# http://www.iemoji.com/view/emoji/1085/objects/dagger. hate (also
# suggested there) seems too strong, as does just "violence".
'1f5e1': {'canonical_name': 'dagger', 'aliases': ['rated_for_violence']},
'2694': {'canonical_name': 'duel', 'aliases': ['swords']},
'1f6e1': {'canonical_name': 'shield', 'aliases': []},
'1f6ac': {'canonical_name': 'smoking', 'aliases': []},
'26b0': {'canonical_name': 'coffin', 'aliases': ['burial', 'grave']},
'26b1': {'canonical_name': 'funeral_urn', 'aliases': ['cremation']},
# amphora is too obscure, I think
'1f3fa': {'canonical_name': 'vase', 'aliases': ['amphora']},
'1f52e': {'canonical_name': 'crystal_ball', 'aliases': ['oracle', 'future', 'fortune_telling']},
'1f4ff': {'canonical_name': 'prayer_beads', 'aliases': []},
'1f488': {'canonical_name': 'barber', 'aliases': ['striped_pole']},
# alchemy seems more useful and less obscure
'2697': {'canonical_name': 'alchemy', 'aliases': ['alembic']},
'1f52d': {'canonical_name': 'telescope', 'aliases': []},
# science seems useful to have. scientist inspired by
# http://www.iemoji.com/view/emoji/787/objects/microscope
'1f52c': {'canonical_name': 'science', 'aliases': ['microscope', 'scientist']},
'1f573': {'canonical_name': 'hole', 'aliases': []},
'1f48a': {'canonical_name': 'medicine', 'aliases': ['pill']},
'1f489': {'canonical_name': 'injection', 'aliases': ['syringe']},
'1f321': {'canonical_name': 'temperature', 'aliases': ['thermometer', 'warm']},
'1f6bd': {'canonical_name': 'toilet', 'aliases': []},
'1f6b0': {'canonical_name': 'potable_water', 'aliases': ['tap_water', 'drinking_water']},
'1f6bf': {'canonical_name': 'shower', 'aliases': []},
'1f6c1': {'canonical_name': 'bathtub', 'aliases': []},
'1f6c0': {'canonical_name': 'bath', 'aliases': []},
# reception and services from
# http://www.iemoji.com/view/emoji/1169/objects/bellhop-bell
'1f6ce': {'canonical_name': 'bellhop_bell', 'aliases': ['reception', 'services', 'ding']},
'1f511': {'canonical_name': 'key', 'aliases': []},
# encrypted from http://www.iemoji.com/view/emoji/1081/objects/old-key,
# secret from http://mashable.com/2015/10/23/ios-9-1-emoji-guide
'1f5dd': {'canonical_name': 'secret', 'aliases': ['dungeon', 'old_key', 'encrypted', 'clue', 'hint']},
'1f6aa': {'canonical_name': 'door', 'aliases': []},
'1f6cb': {'canonical_name': 'living_room', 'aliases': ['furniture', 'couch_and_lamp', 'lifestyles']},
'1f6cf': {'canonical_name': 'bed', 'aliases': ['bedroom']},
# guestrooms from iemoji, would add hotel but taken by Places/94
'1f6cc': {'canonical_name': 'in_bed', 'aliases': ['accommodations', 'guestrooms']},
'1f5bc': {'canonical_name': 'picture', 'aliases': ['framed_picture']},
'1f6cd': {'canonical_name': 'shopping_bags', 'aliases': []},
# https://trends.google.com/trends/explore?q=shopping%20cart,shopping%20trolley
'1f6d2': {'canonical_name': 'shopping_cart', 'aliases': ['shopping_trolley']},
'1f381': {'canonical_name': 'gift', 'aliases': ['present']},
# seemed like the best celebration
'1f388': {'canonical_name': 'balloon', 'aliases': ['celebration']},
# from gemoji/unicode
'1f38f': {'canonical_name': 'carp_streamer', 'aliases': ['flags']},
'1f380': {'canonical_name': 'ribbon', 'aliases': ['decoration']},
'1f38a': {'canonical_name': 'confetti', 'aliases': ['party_ball']},
# seemed like the best congratulations
'1f389': {'canonical_name': 'tada', 'aliases': ['congratulations']},
'1f38e': {'canonical_name': 'dolls', 'aliases': []},
'1f3ee': {'canonical_name': 'lantern', 'aliases': ['izakaya_lantern']},
'1f390': {'canonical_name': 'wind_chime', 'aliases': []},
'2709': {'canonical_name': 'email', 'aliases': ['envelope', 'mail']},
# seems useful for chat?
'1f4e9': {'canonical_name': 'mail_sent', 'aliases': ['sealed']},
'1f4e8': {'canonical_name': 'mail_received', 'aliases': []},
'1f4e7': {'canonical_name': 'e-mail', 'aliases': []},
'1f48c': {'canonical_name': 'love_letter', 'aliases': []},
'1f4e5': {'canonical_name': 'inbox', 'aliases': []},
'1f4e4': {'canonical_name': 'outbox', 'aliases': []},
'1f4e6': {'canonical_name': 'package', 'aliases': []},
# price_tag from iemoji
'1f3f7': {'canonical_name': 'label', 'aliases': ['tag', 'price_tag']},
'1f4ea': {'canonical_name': 'closed_mailbox', 'aliases': []},
'1f4eb': {'canonical_name': 'mailbox', 'aliases': []},
'1f4ec': {'canonical_name': 'unread_mail', 'aliases': []},
'1f4ed': {'canonical_name': 'inbox_zero', 'aliases': ['empty_mailbox', 'no_mail']},
'1f4ee': {'canonical_name': 'mail_dropoff', 'aliases': []},
'1f4ef': {'canonical_name': 'horn', 'aliases': []},
'1f4dc': {'canonical_name': 'scroll', 'aliases': []},
# receipt seems more useful?
'1f4c3': {'canonical_name': 'receipt', 'aliases': []},
'1f4c4': {'canonical_name': 'document', 'aliases': ['paper', 'file', 'page']},
'1f4d1': {'canonical_name': 'place_holder', 'aliases': []},
'1f4ca': {'canonical_name': 'bar_chart', 'aliases': []},
# seems like the best chart
'1f4c8': {'canonical_name': 'chart', 'aliases': ['upwards_trend', 'growing', 'increasing']},
'1f4c9': {'canonical_name': 'downwards_trend', 'aliases': ['shrinking', 'decreasing']},
'1f5d2': {'canonical_name': 'spiral_notepad', 'aliases': []},
# '1f5d3': {'canonical_name': 'X', 'aliases': ['spiral_calendar_pad']},
# swapped the following two largely due to the emojione glyphs
'1f4c6': {'canonical_name': 'date', 'aliases': []},
'1f4c5': {'canonical_name': 'calendar', 'aliases': []},
'1f4c7': {'canonical_name': 'rolodex', 'aliases': ['card_index']},
'1f5c3': {'canonical_name': 'archive', 'aliases': []},
'1f5f3': {'canonical_name': 'ballot_box', 'aliases': []},
'1f5c4': {'canonical_name': 'file_cabinet', 'aliases': []},
'1f4cb': {'canonical_name': 'clipboard', 'aliases': []},
# don't need two file_folders, so made this organize
'1f4c1': {'canonical_name': 'organize', 'aliases': ['file_folder']},
'1f4c2': {'canonical_name': 'folder', 'aliases': []},
'1f5c2': {'canonical_name': 'sort', 'aliases': []},
'1f5de': {'canonical_name': 'newspaper', 'aliases': ['swat']},
'1f4f0': {'canonical_name': 'headlines', 'aliases': []},
'1f4d3': {'canonical_name': 'notebook', 'aliases': ['composition_book']},
'1f4d4': {'canonical_name': 'decorative_notebook', 'aliases': []},
'1f4d2': {'canonical_name': 'ledger', 'aliases': ['spiral_notebook']},
# the glyphs here are the same as Objects/147-149 (with a different color),
# for all but google
'1f4d5': {'canonical_name': 'red_book', 'aliases': ['closed_book']},
'1f4d7': {'canonical_name': 'green_book', 'aliases': []},
'1f4d8': {'canonical_name': 'blue_book', 'aliases': []},
'1f4d9': {'canonical_name': 'orange_book', 'aliases': []},
'1f4da': {'canonical_name': 'books', 'aliases': []},
'1f4d6': {'canonical_name': 'book', 'aliases': ['open_book']},
'1f516': {'canonical_name': 'bookmark', 'aliases': []},
'1f517': {'canonical_name': 'link', 'aliases': []},
'1f4ce': {'canonical_name': 'paperclip', 'aliases': ['attachment']},
# office_supplies from http://mashable.com/2015/10/23/ios-9-1-emoji-guide
'1f587': {'canonical_name': 'office_supplies', 'aliases': ['paperclip_chain', 'linked']},
'1f4d0': {'canonical_name': 'carpenter_square', 'aliases': ['triangular_ruler']},
'1f4cf': {'canonical_name': 'ruler', 'aliases': ['straightedge']},
'1f4cc': {'canonical_name': 'push_pin', 'aliases': ['thumb_tack']},
'1f4cd': {'canonical_name': 'pin', 'aliases': ['sewing_pin']},
'2702': {'canonical_name': 'scissors', 'aliases': []},
'1f58a': {'canonical_name': 'pen', 'aliases': ['ballpoint_pen']},
'1f58b': {'canonical_name': 'fountain_pen', 'aliases': []},
# three of the four emoji sets just have a rightwards-facing objects/162
# '2712': {'canonical_name': 'X', 'aliases': ['black_nib']},
'1f58c': {'canonical_name': 'paintbrush', 'aliases': []},
'1f58d': {'canonical_name': 'crayon', 'aliases': []},
'1f4dd': {'canonical_name': 'memo', 'aliases': ['note']},
'270f': {'canonical_name': 'pencil', 'aliases': []},
'1f50d': {'canonical_name': 'search', 'aliases': ['find', 'magnifying_glass']},
# '1f50e': {'canonical_name': 'X', 'aliases': ['mag_right']},
# https://emojipedia.org/lock-with-ink-pen/
'1f50f': {'canonical_name': 'privacy', 'aliases': ['key_signing', 'digital_security', 'protected']},
'1f510': {'canonical_name': 'secure', 'aliases': ['lock_with_key', 'safe', 'commitment', 'loyalty']},
'1f512': {'canonical_name': 'locked', 'aliases': []},
'1f513': {'canonical_name': 'unlocked', 'aliases': []},
# seems the best glyph for love and love_you
'2764': {'canonical_name': 'heart', 'aliases': ['love', 'love_you']},
'1f49b': {'canonical_name': 'yellow_heart', 'aliases': ['heart_of_gold']},
'1f49a': {'canonical_name': 'green_heart', 'aliases': ['envy']},
'1f499': {'canonical_name': 'blue_heart', 'aliases': []},
'1f49c': {'canonical_name': 'purple_heart', 'aliases': ['bravery']},
'1f5a4': {'canonical_name': 'black_heart', 'aliases': []},
'1f494': {'canonical_name': 'broken_heart', 'aliases': ['heartache']},
'2763': {'canonical_name': 'heart_exclamation', 'aliases': []},
'1f495': {'canonical_name': 'two_hearts', 'aliases': []},
'1f49e': {'canonical_name': 'revolving_hearts', 'aliases': []},
'1f493': {'canonical_name': 'heartbeat', 'aliases': []},
'1f497': {'canonical_name': 'heart_pulse', 'aliases': ['growing_heart']},
'1f496': {'canonical_name': 'sparkling_heart', 'aliases': []},
'1f498': {'canonical_name': 'cupid', 'aliases': ['smitten', 'heart_arrow']},
'1f49d': {'canonical_name': 'gift_heart', 'aliases': []},
'1f49f': {'canonical_name': 'heart_box', 'aliases': []},
'262e': {'canonical_name': 'peace', 'aliases': []},
'271d': {'canonical_name': 'cross', 'aliases': ['christianity']},
'262a': {'canonical_name': 'star_and_crescent', 'aliases': ['islam']},
'1f549': {'canonical_name': 'om', 'aliases': ['hinduism']},
'2638': {'canonical_name': 'wheel_of_dharma', 'aliases': ['buddhism']},
'2721': {'canonical_name': 'star_of_david', 'aliases': ['judiasm']},
# can't find any explanation of this at all. Is an alternate star of david?
# '1f52f': {'canonical_name': 'X', 'aliases': ['six_pointed_star']},
'1f54e': {'canonical_name': 'menorah', 'aliases': []},
'262f': {'canonical_name': 'yin_yang', 'aliases': []},
'2626': {'canonical_name': 'orthodox_cross', 'aliases': []},
'1f6d0': {'canonical_name': 'place_of_worship', 'aliases': []},
'26ce': {'canonical_name': 'ophiuchus', 'aliases': []},
'2648': {'canonical_name': 'aries', 'aliases': []},
'2649': {'canonical_name': 'taurus', 'aliases': []},
'264a': {'canonical_name': 'gemini', 'aliases': []},
'264b': {'canonical_name': 'cancer', 'aliases': []},
'264c': {'canonical_name': 'leo', 'aliases': []},
'264d': {'canonical_name': 'virgo', 'aliases': []},
'264e': {'canonical_name': 'libra', 'aliases': []},
'264f': {'canonical_name': 'scorpius', 'aliases': []},
'2650': {'canonical_name': 'sagittarius', 'aliases': []},
'2651': {'canonical_name': 'capricorn', 'aliases': []},
'2652': {'canonical_name': 'aquarius', 'aliases': []},
'2653': {'canonical_name': 'pisces', 'aliases': []},
'1f194': {'canonical_name': 'id', 'aliases': []},
'269b': {'canonical_name': 'atom', 'aliases': ['physics']},
# japanese symbol
# '1f251': {'canonical_name': 'X', 'aliases': ['accept']},
'2622': {'canonical_name': 'radioactive', 'aliases': ['nuclear']},
'2623': {'canonical_name': 'biohazard', 'aliases': []},
'1f4f4': {'canonical_name': 'phone_off', 'aliases': []},
'1f4f3': {'canonical_name': 'vibration_mode', 'aliases': []},
# '1f236': {'canonical_name': 'X', 'aliases': ['u6709']},
# '1f21a': {'canonical_name': 'X', 'aliases': ['u7121']},
# '1f238': {'canonical_name': 'X', 'aliases': ['u7533']},
# '1f23a': {'canonical_name': 'X', 'aliases': ['u55b6']},
# '1f237': {'canonical_name': 'X', 'aliases': ['u6708']},
'2734': {'canonical_name': 'eight_pointed_star', 'aliases': []},
'1f19a': {'canonical_name': 'vs', 'aliases': []},
'1f4ae': {'canonical_name': 'white_flower', 'aliases': []},
# '1f250': {'canonical_name': 'X', 'aliases': ['ideograph_advantage']},
# japanese character
# '3299': {'canonical_name': 'X', 'aliases': ['secret']},
# '3297': {'canonical_name': 'X', 'aliases': ['congratulations']},
# '1f234': {'canonical_name': 'X', 'aliases': ['u5408']},
# '1f235': {'canonical_name': 'X', 'aliases': ['u6e80']},
# '1f239': {'canonical_name': 'X', 'aliases': ['u5272']},
# '1f232': {'canonical_name': 'X', 'aliases': ['u7981']},
'1f170': {'canonical_name': 'a', 'aliases': []},
'1f171': {'canonical_name': 'b', 'aliases': []},
'1f18e': {'canonical_name': 'ab', 'aliases': []},
'1f191': {'canonical_name': 'cl', 'aliases': []},
'1f17e': {'canonical_name': 'o', 'aliases': []},
'1f198': {'canonical_name': 'sos', 'aliases': []},
# Symbols/105 seems like a better x, and looks more like the other letters
'274c': {'canonical_name': 'cross_mark', 'aliases': ['incorrect', 'wrong']},
'2b55': {'canonical_name': 'circle', 'aliases': []},
'1f6d1': {'canonical_name': 'stop_sign', 'aliases': ['octagonal_sign']},
'26d4': {'canonical_name': 'no_entry', 'aliases': ['wrong_way']},
'1f4db': {'canonical_name': 'name_badge', 'aliases': []},
'1f6ab': {'canonical_name': 'prohibited', 'aliases': ['not_allowed']},
'1f4af': {'canonical_name': '100', 'aliases': ['hundred']},
'1f4a2': {'canonical_name': 'anger', 'aliases': ['bam', 'pow']},
'2668': {'canonical_name': 'hot_springs', 'aliases': []},
'1f6b7': {'canonical_name': 'no_pedestrians', 'aliases': []},
'1f6af': {'canonical_name': 'do_not_litter', 'aliases': []},
'1f6b3': {'canonical_name': 'no_bicycles', 'aliases': []},
'1f6b1': {'canonical_name': 'non-potable_water', 'aliases': []},
'1f51e': {'canonical_name': 'underage', 'aliases': ['nc17']},
'1f4f5': {'canonical_name': 'no_phones', 'aliases': []},
'1f6ad': {'canonical_name': 'no_smoking', 'aliases': []},
'2757': {'canonical_name': 'exclamation', 'aliases': []},
'2755': {'canonical_name': 'grey_exclamation', 'aliases': []},
'2753': {'canonical_name': 'question', 'aliases': []},
'2754': {'canonical_name': 'grey_question', 'aliases': []},
'203c': {'canonical_name': 'bangbang', 'aliases': ['double_exclamation']},
'2049': {'canonical_name': 'interrobang', 'aliases': []},
'1f505': {'canonical_name': 'low_brightness', 'aliases': ['dim']},
'1f506': {'canonical_name': 'brightness', 'aliases': ['high_brightness']},
'303d': {'canonical_name': 'part_alternation', 'aliases': []},
'26a0': {'canonical_name': 'warning', 'aliases': ['caution', 'danger']},
'1f6b8': {'canonical_name': 'children_crossing', 'aliases': ['school_crossing', 'drive_with_care']},
'1f531': {'canonical_name': 'trident', 'aliases': []},
'269c': {'canonical_name': 'fleur_de_lis', 'aliases': []},
'1f530': {'canonical_name': 'beginner', 'aliases': []},
'267b': {'canonical_name': 'recycle', 'aliases': []},
# seems like the best check
'2705': {'canonical_name': 'check', 'aliases': ['all_good', 'approved']},
# '1f22f': {'canonical_name': 'X', 'aliases': ['u6307']},
# stock_market seemed more useful
'1f4b9': {'canonical_name': 'stock_market', 'aliases': []},
'2747': {'canonical_name': 'sparkle', 'aliases': []},
'2733': {'canonical_name': 'eight_spoked_asterisk', 'aliases': []},
'274e': {'canonical_name': 'x', 'aliases': []},
'1f310': {'canonical_name': 'www', 'aliases': ['globe']},
'1f4a0': {'canonical_name': 'cute', 'aliases': ['kawaii', 'diamond_with_a_dot']},
'24c2': {'canonical_name': 'metro', 'aliases': ['m']},
'1f300': {'canonical_name': 'cyclone', 'aliases': ['hurricane', 'typhoon']},
'1f4a4': {'canonical_name': 'zzz', 'aliases': []},
'1f3e7': {'canonical_name': 'atm', 'aliases': []},
'1f6be': {'canonical_name': 'wc', 'aliases': ['water_closet']},
'267f': {'canonical_name': 'accessible', 'aliases': ['wheelchair', 'disabled']},
'1f17f': {'canonical_name': 'parking', 'aliases': ['p']},
# '1f233': {'canonical_name': 'X', 'aliases': ['u7a7a']},
# '1f202': {'canonical_name': 'X', 'aliases': ['sa']},
'1f6c2': {'canonical_name': 'passport_control', 'aliases': ['immigration']},
'1f6c3': {'canonical_name': 'customs', 'aliases': []},
'1f6c4': {'canonical_name': 'baggage_claim', 'aliases': []},
'1f6c5': {'canonical_name': 'locker', 'aliases': ['locked_bag']},
'1f6b9': {'canonical_name': 'mens', 'aliases': []},
'1f6ba': {'canonical_name': 'womens', 'aliases': []},
# seems more in line with the surrounding bathroom symbols
'1f6bc': {'canonical_name': 'baby_change_station', 'aliases': ['nursery']},
'1f6bb': {'canonical_name': 'restroom', 'aliases': []},
'1f6ae': {'canonical_name': 'put_litter_in_its_place', 'aliases': []},
'1f3a6': {'canonical_name': 'cinema', 'aliases': ['movie_theater']},
'1f4f6': {'canonical_name': 'cell_reception', 'aliases': ['signal_strength', 'signal_bars']},
# '1f201': {'canonical_name': 'X', 'aliases': ['koko']},
'1f523': {'canonical_name': 'symbols', 'aliases': []},
'2139': {'canonical_name': 'info', 'aliases': []},
'1f524': {'canonical_name': 'abc', 'aliases': []},
'1f521': {'canonical_name': 'abcd', 'aliases': ['alphabet']},
'1f520': {'canonical_name': 'capital_abcd', 'aliases': ['capital_letters']},
'1f196': {'canonical_name': 'ng', 'aliases': []},
# from unicode/gemoji. Saving ok for People/111
'1f197': {'canonical_name': 'squared_ok', 'aliases': []},
# from unicode, and to parallel Symbols/135. Saving up for Symbols/171
'1f199': {'canonical_name': 'squared_up', 'aliases': []},
'1f192': {'canonical_name': 'cool', 'aliases': []},
'1f195': {'canonical_name': 'new', 'aliases': []},
'1f193': {'canonical_name': 'free', 'aliases': []},
'0030-20e3': {'canonical_name': 'zero', 'aliases': []},
'0031-20e3': {'canonical_name': 'one', 'aliases': []},
'0032-20e3': {'canonical_name': 'two', 'aliases': []},
'0033-20e3': {'canonical_name': 'three', 'aliases': []},
'0034-20e3': {'canonical_name': 'four', 'aliases': []},
'0035-20e3': {'canonical_name': 'five', 'aliases': []},
'0036-20e3': {'canonical_name': 'six', 'aliases': []},
'0037-20e3': {'canonical_name': 'seven', 'aliases': []},
'0038-20e3': {'canonical_name': 'eight', 'aliases': []},
'0039-20e3': {'canonical_name': 'nine', 'aliases': []},
'1f51f': {'canonical_name': 'ten', 'aliases': []},
'1f522': {'canonical_name': '1234', 'aliases': ['numbers']},
'0023-20e3': {'canonical_name': 'hash', 'aliases': []},
'002a-20e3': {'canonical_name': 'asterisk', 'aliases': []},
'25b6': {'canonical_name': 'play', 'aliases': []},
'23f8': {'canonical_name': 'pause', 'aliases': []},
'23ef': {'canonical_name': 'play_pause', 'aliases': []},
# stop taken by People/118
'23f9': {'canonical_name': 'stop_button', 'aliases': []},
'23fa': {'canonical_name': 'record', 'aliases': []},
'23ed': {'canonical_name': 'next_track', 'aliases': ['skip_forward']},
'23ee': {'canonical_name': 'previous_track', 'aliases': ['skip_back']},
'23e9': {'canonical_name': 'fast_forward', 'aliases': []},
'23ea': {'canonical_name': 'rewind', 'aliases': ['fast_reverse']},
'23eb': {'canonical_name': 'double_up', 'aliases': ['fast_up']},
'23ec': {'canonical_name': 'double_down', 'aliases': ['fast_down']},
'25c0': {'canonical_name': 'play_reverse', 'aliases': []},
'1f53c': {'canonical_name': 'upvote', 'aliases': ['up_button', 'increase']},
'1f53d': {'canonical_name': 'downvote', 'aliases': ['down_button', 'decrease']},
'27a1': {'canonical_name': 'right', 'aliases': ['east']},
'2b05': {'canonical_name': 'left', 'aliases': ['west']},
'2b06': {'canonical_name': 'up', 'aliases': ['north']},
'2b07': {'canonical_name': 'down', 'aliases': ['south']},
'2197': {'canonical_name': 'upper_right', 'aliases': ['north_east']},
'2198': {'canonical_name': 'lower_right', 'aliases': ['south_east']},
'2199': {'canonical_name': 'lower_left', 'aliases': ['south_west']},
'2196': {'canonical_name': 'upper_left', 'aliases': ['north_west']},
'2195': {'canonical_name': 'up_down', 'aliases': []},
'2194': {'canonical_name': 'left_right', 'aliases': ['swap']},
'21aa': {'canonical_name': 'forward', 'aliases': ['right_hook']},
'21a9': {'canonical_name': 'reply', 'aliases': ['left_hook']},
'2934': {'canonical_name': 'heading_up', 'aliases': []},
'2935': {'canonical_name': 'heading_down', 'aliases': []},
'1f500': {'canonical_name': 'shuffle', 'aliases': []},
'1f501': {'canonical_name': 'repeat', 'aliases': []},
'1f502': {'canonical_name': 'repeat_one', 'aliases': []},
'1f504': {'canonical_name': 'counterclockwise', 'aliases': ['return']},
'1f503': {'canonical_name': 'clockwise', 'aliases': []},
'1f3b5': {'canonical_name': 'music', 'aliases': []},
'1f3b6': {'canonical_name': 'musical_notes', 'aliases': []},
'2795': {'canonical_name': 'plus', 'aliases': ['add']},
'2796': {'canonical_name': 'minus', 'aliases': ['subtract']},
'2797': {'canonical_name': 'division', 'aliases': ['divide']},
'2716': {'canonical_name': 'multiplication', 'aliases': ['multiply']},
'1f4b2': {'canonical_name': 'dollars', 'aliases': []},
# There is no other exchange, so might as well generalize this
'1f4b1': {'canonical_name': 'exchange', 'aliases': []},
'2122': {'canonical_name': 'tm', 'aliases': ['trademark']},
'3030': {'canonical_name': 'wavy_dash', 'aliases': []},
'27b0': {'canonical_name': 'loop', 'aliases': []},
# https://emojipedia.org/double-curly-loop/
'27bf': {'canonical_name': 'double_loop', 'aliases': ['voicemail']},
'1f51a': {'canonical_name': 'end', 'aliases': []},
'1f519': {'canonical_name': 'back', 'aliases': []},
'1f51b': {'canonical_name': 'on', 'aliases': []},
'1f51d': {'canonical_name': 'top', 'aliases': []},
'1f51c': {'canonical_name': 'soon', 'aliases': []},
'2714': {'canonical_name': 'check_mark', 'aliases': []},
'2611': {'canonical_name': 'checkbox', 'aliases': []},
'1f518': {'canonical_name': 'radio_button', 'aliases': []},
'26aa': {'canonical_name': 'white_circle', 'aliases': []},
'26ab': {'canonical_name': 'black_circle', 'aliases': []},
'1f534': {'canonical_name': 'red_circle', 'aliases': []},
'1f535': {'canonical_name': 'blue_circle', 'aliases': []},
'1f53a': {'canonical_name': 'red_triangle_up', 'aliases': []},
'1f53b': {'canonical_name': 'red_triangle_down', 'aliases': []},
'1f538': {'canonical_name': 'small_orange_diamond', 'aliases': []},
'1f539': {'canonical_name': 'small_blue_diamond', 'aliases': []},
'1f536': {'canonical_name': 'large_orange_diamond', 'aliases': []},
'1f537': {'canonical_name': 'large_blue_diamond', 'aliases': []},
'1f533': {'canonical_name': 'black_and_white_square', 'aliases': []},
'1f532': {'canonical_name': 'white_and_black_square', 'aliases': []},
'25aa': {'canonical_name': 'black_small_square', 'aliases': []},
'25ab': {'canonical_name': 'white_small_square', 'aliases': []},
'25fe': {'canonical_name': 'black_medium_small_square', 'aliases': []},
'25fd': {'canonical_name': 'white_medium_small_square', 'aliases': []},
'25fc': {'canonical_name': 'black_medium_square', 'aliases': []},
'25fb': {'canonical_name': 'white_medium_square', 'aliases': []},
'2b1b': {'canonical_name': 'black_large_square', 'aliases': []},
'2b1c': {'canonical_name': 'white_large_square', 'aliases': []},
'1f508': {'canonical_name': 'speaker', 'aliases': []},
'1f507': {'canonical_name': 'mute', 'aliases': ['no_sound']},
'1f509': {'canonical_name': 'softer', 'aliases': []},
'1f50a': {'canonical_name': 'louder', 'aliases': ['sound']},
'1f514': {'canonical_name': 'notifications', 'aliases': ['bell']},
'1f515': {'canonical_name': 'mute_notifications', 'aliases': []},
'1f4e3': {'canonical_name': 'megaphone', 'aliases': ['shout']},
'1f4e2': {'canonical_name': 'loudspeaker', 'aliases': ['bullhorn']},
'1f4ac': {'canonical_name': 'umm', 'aliases': ['speech_balloon']},
'1f5e8': {'canonical_name': 'speech_bubble', 'aliases': []},
'1f4ad': {'canonical_name': 'thought', 'aliases': ['dream']},
'1f5ef': {'canonical_name': 'anger_bubble', 'aliases': []},
'2660': {'canonical_name': 'spades', 'aliases': []},
'2663': {'canonical_name': 'clubs', 'aliases': []},
'2665': {'canonical_name': 'hearts', 'aliases': []},
'2666': {'canonical_name': 'diamonds', 'aliases': []},
'1f0cf': {'canonical_name': 'joker', 'aliases': []},
'1f3b4': {'canonical_name': 'playing_cards', 'aliases': []},
'1f004': {'canonical_name': 'mahjong', 'aliases': []},
# The only use I can think of for so many clocks is to be able to use them
# to vote on times and such in emoji reactions. But a) the experience is
# not that great (the images are too small), b) there are issues with
# 24-hour time (used in many countries), like what is 00:30 or 01:00
# called, c) it's hard to make the compose typeahead experience great, and
# d) we should have a dedicated time voting widget that takes care of
# timezone and locale issues, and uses a digital representation.
# '1f550': {'canonical_name': 'X', 'aliases': ['clock1']},
# '1f551': {'canonical_name': 'X', 'aliases': ['clock2']},
# '1f552': {'canonical_name': 'X', 'aliases': ['clock3']},
# '1f553': {'canonical_name': 'X', 'aliases': ['clock4']},
# '1f554': {'canonical_name': 'X', 'aliases': ['clock5']},
# '1f555': {'canonical_name': 'X', 'aliases': ['clock6']},
# '1f556': {'canonical_name': 'X', 'aliases': ['clock7']},
# seems like the best choice for time
'1f557': {'canonical_name': 'time', 'aliases': ['clock']},
# '1f558': {'canonical_name': 'X', 'aliases': ['clock9']},
# '1f559': {'canonical_name': 'X', 'aliases': ['clock10']},
# '1f55a': {'canonical_name': 'X', 'aliases': ['clock11']},
# '1f55b': {'canonical_name': 'X', 'aliases': ['clock12']},
# '1f55c': {'canonical_name': 'X', 'aliases': ['clock130']},
# '1f55d': {'canonical_name': 'X', 'aliases': ['clock230']},
# '1f55e': {'canonical_name': 'X', 'aliases': ['clock330']},
# '1f55f': {'canonical_name': 'X', 'aliases': ['clock430']},
# '1f560': {'canonical_name': 'X', 'aliases': ['clock530']},
# '1f561': {'canonical_name': 'X', 'aliases': ['clock630']},
# '1f562': {'canonical_name': 'X', 'aliases': ['clock730']},
# '1f563': {'canonical_name': 'X', 'aliases': ['clock830']},
# '1f564': {'canonical_name': 'X', 'aliases': ['clock930']},
# '1f565': {'canonical_name': 'X', 'aliases': ['clock1030']},
# '1f566': {'canonical_name': 'X', 'aliases': ['clock1130']},
# '1f567': {'canonical_name': 'X', 'aliases': ['clock1230']},
'1f3f3': {'canonical_name': 'white_flag', 'aliases': ['surrender']},
'1f3f4': {'canonical_name': 'black_flag', 'aliases': []},
'1f3c1': {'canonical_name': 'checkered_flag', 'aliases': ['race', 'go', 'start']},
'1f6a9': {'canonical_name': 'triangular_flag', 'aliases': []},
# solidarity from iemoji
'1f38c': {'canonical_name': 'crossed_flags', 'aliases': ['solidarity']},
} # type: Dict[str, Dict[str, Any]]
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/setup/emoji/emoji_setup_utils.py | # This file contains various helper functions used by `build_emoji` tool.
# See docs/subsystems/emoji.md for details on how this system works.
from collections import defaultdict
from typing import Any, Dict, List
# Emojisets that we currently support.
EMOJISETS = ['apple', 'emojione', 'google', 'twitter']
# Some image files in the old emoji farm had a different name than in the new emoji
# farm. `remapped_emojis` is a map that contains a mapping of their name in the old
# emoji farm to their name in the new emoji farm.
REMAPPED_EMOJIS = {
"0023": "0023-20e3", # Hash
"0030": "0030-20e3", # Zero
"0031": "0031-20e3", # One
"0032": "0032-20e3", # Two
"0033": "0033-20e3", # Three
"0034": "0034-20e3", # Four
"0035": "0035-20e3", # Five
"0036": "0036-20e3", # Six
"0037": "0037-20e3", # Seven
"0038": "0038-20e3", # Eight
"0039": "0039-20e3", # Nine
"1f1e8": "1f1e8-1f1f3", # cn
"1f1e9": "1f1e9-1f1ea", # de
"1f1ea": "1f1ea-1f1f8", # es
"1f1eb": "1f1eb-1f1f7", # fr
"1f1ec": "1f1ec-1f1e7", # gb/us
"1f1ee": "1f1ee-1f1f9", # it
"1f1ef": "1f1ef-1f1f5", # jp
"1f1f0": "1f1f0-1f1f7", # kr
"1f1f7": "1f1f7-1f1fa", # ru
"1f1fa": "1f1fa-1f1f8", # us
}
# Emoticons and which emoji they should become. Duplicate emoji are allowed.
# Changes here should be mimicked in `templates/zerver/help/enable-emoticon-translations.md`.
EMOTICON_CONVERSIONS = {
':)': ':slight_smile:',
'(:': ':slight_smile:',
':(': ':frown:',
'<3': ':heart:',
':|': ':expressionless:',
':/': ':confused:',
}
def emoji_names_for_picker(emoji_name_maps: Dict[str, Dict[str, Any]]) -> List[str]:
emoji_names = [] # type: List[str]
for emoji_code, name_info in emoji_name_maps.items():
emoji_names.append(name_info["canonical_name"])
emoji_names.extend(name_info["aliases"])
return sorted(emoji_names)
def get_emoji_code(emoji_dict: Dict[str, Any]) -> str:
# Starting from version 4.0.0, `emoji_datasource` package has started to
# add an emoji presentation variation selector for certain emojis which
# have defined variation sequences. Since in informal environments(like
# texting and chat), it is more appropriate for an emoji to have a colorful
# display so until emoji characters have a text presentation selector, it
# should have a colorful display. Hence we can continue using emoji characters
# without appending emoji presentation selector.
# (http://unicode.org/reports/tr51/index.html#Presentation_Style)
# If `non_qualified` field is present and not None return it otherwise
# return `unified` field.
emoji_code = emoji_dict.get("non_qualified") or emoji_dict["unified"]
return emoji_code.lower()
# Returns a dict from categories to list of codepoints. The list of
# codepoints are sorted according to the `sort_order` as defined in
# `emoji_data`.
def generate_emoji_catalog(emoji_data: List[Dict[str, Any]],
emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
sort_order = {} # type: Dict[str, int]
emoji_catalog = defaultdict(list) # type: Dict[str, List[str]]
for emoji_dict in emoji_data:
emoji_code = get_emoji_code(emoji_dict)
if not emoji_is_universal(emoji_dict) or emoji_code not in emoji_name_maps:
continue
category = emoji_dict["category"]
sort_order[emoji_code] = emoji_dict["sort_order"]
emoji_catalog[category].append(emoji_code)
# Sort the emojis according to iamcal's sort order. This sorting determines the
# order in which emojis will be displayed in emoji picker.
for category in emoji_catalog:
emoji_catalog[category].sort(key=lambda emoji_code: sort_order[emoji_code])
return dict(emoji_catalog)
# Use only those names for which images are present in all
# the emoji sets so that we can switch emoji sets seemlessly.
def emoji_is_universal(emoji_dict: Dict[str, Any]) -> bool:
for emoji_set in EMOJISETS:
if not emoji_dict['has_img_' + emoji_set]:
return False
return True
def generate_codepoint_to_name_map(emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
codepoint_to_name = {} # type: Dict[str, str]
for emoji_code, name_info in emoji_name_maps.items():
codepoint_to_name[emoji_code] = name_info["canonical_name"]
return codepoint_to_name
def generate_name_to_codepoint_map(emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
name_to_codepoint = {}
for emoji_code, name_info in emoji_name_maps.items():
canonical_name = name_info["canonical_name"]
aliases = name_info["aliases"]
name_to_codepoint[canonical_name] = emoji_code
for alias in aliases:
name_to_codepoint[alias] = emoji_code
return name_to_codepoint
| [
"Dict[str, Dict[str, Any]]",
"Dict[str, Any]",
"List[Dict[str, Any]]",
"Dict[str, Dict[str, Any]]",
"Dict[str, Any]",
"Dict[str, Dict[str, Any]]",
"Dict[str, Dict[str, Any]]"
] | [
1771,
2079,
3097,
3163,
4122,
4324,
4629
] | [
1796,
2093,
3117,
3188,
4136,
4349,
4654
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/setup/setup_venvs.py | #!/usr/bin/env python3
import os
import sys
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if ZULIP_PATH not in sys.path:
sys.path.append(ZULIP_PATH)
from scripts.lib.setup_venv import setup_virtualenv
from scripts.lib.zulip_tools import run, subprocess_text_output
VENV_PATH = "/srv/zulip-py3-venv"
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "dev.txt")
THUMBOR_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "thumbor.txt")
def main() -> None:
# Get the correct Python interpreter. If we don't do this and use
# `virtualenv -p python3` to create the venv in Travis, the venv
# starts referring to the system Python interpreter.
python_interpreter = subprocess_text_output(['which', 'python3'])
setup_virtualenv("/srv/zulip-thumbor-venv", THUMBOR_REQS_FILE,
patch_activate_script=True, virtualenv_args=['-p', 'python2.7'])
setup_virtualenv(VENV_PATH, DEV_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', python_interpreter])
if __name__ == "__main__":
main()
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_capitalization_checker.py | from bs4 import BeautifulSoup
from unittest import TestCase
from tools.lib.capitalization import check_capitalization, is_capitalized, \
get_safe_text
class GetSafeTextTestCase(TestCase):
def test_get_safe_text(self) -> None:
string = ('Messages in __page_params.product_name__ go to a '
'stream and have a topic.')
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Messages in __page_params_product_name__ '
'go to a stream and have a topic.')
string = "Zulip Zulip. Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Zulip zulip. Zulip some text!')
string = "Zulip Zulip? Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Zulip zulip? Zulip some text!')
string = "Zulip Zulip! Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Zulip zulip! Zulip some text!')
string = "Zulip Zulip, Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Zulip zulip, zulip some text!')
string = "Some text 25MiB"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Some text 25mib')
string = "Not Ignored Phrase"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Not Ignored Phrase')
string = "Not ignored phrase"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Not ignored phrase')
string = ""
safe_text = get_safe_text(string)
self.assertEqual(safe_text, '')
string = """
<p>Please re-enter your password to confirm your identity.
(<a href="/accounts/password/reset/" target="_blank">Forgotten it?</a>)</p>
"""
safe_text = get_safe_text(string)
soup = BeautifulSoup(safe_text, 'lxml')
rendered_text = ' '.join(soup.text.split())
self.assertEqual(safe_text, rendered_text)
string = "Edited (__last_edit_timestr__)"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, string)
string = "iPhone application"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'Iphone application')
string = "One two etc. three"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'One two etc_ three')
string = "One two etc. three. four"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, 'One two etc_ three. four')
class IsCapitalizedTestCase(TestCase):
def test_process_text(self) -> None:
string = "Zulip zulip. Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip? Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip! Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip, Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Some number 25mib"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Not Ignored Phrase"
capitalized = is_capitalized(string)
self.assertFalse(capitalized)
string = "Not ignored phrase"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = ""
capitalized = is_capitalized(string)
self.assertFalse(capitalized)
string = ("Please re-enter your password to confirm your identity."
" (Forgotten it?)")
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Edited (__last_edit_timestr__)"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Iphone application"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "One two etc_ three"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
class CheckCapitalizationTestCase(TestCase):
def test_check_capitalization(self) -> None:
strings = ["Zulip Zulip. Zulip some text!",
"Zulip Zulip? Zulip some text!",
"Zulip Zulip! Zulip some text!",
"Zulip Zulip, Zulip some text!",
"Some number 25MiB",
"Not Ignored Phrase",
"Not ignored phrase",
"Some text with realm in it",
"Realm in capital case",
('<p class="bot-settings-note padded-container"> Looking for our '
'<a href="/integrations" target="_blank">Integrations</a> or '
'<a href="/api" target="_blank">API</a> '
'documentation? </p>'),
]
errored, ignored, banned = check_capitalization(strings)
self.assertEqual(errored, ['Not Ignored Phrase'])
self.assertEqual(
ignored,
sorted(["Zulip Zulip. Zulip some text!",
"Zulip Zulip? Zulip some text!",
"Zulip Zulip! Zulip some text!",
"Zulip Zulip, Zulip some text!",
"Some number 25MiB",
('<p class="bot-settings-note padded-container"> Looking '
'for our <a href="/integrations" target="_blank">'
'Integrations</a> or <a href="/api" '
'target="_blank">API</a> documentation? </p>'),
]))
self.assertEqual(banned,
sorted(["realm found in 'Some text with realm in it'. "
"The term realm should not appear in user-facing "
"strings. Use organization instead.",
"realm found in 'Realm in capital case'. "
"The term realm should not appear in user-facing "
"strings. Use organization instead.",
]))
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_css_parser.py |
from typing import cast, Any
import sys
import unittest
try:
from tools.lib.css_parser import (
CssParserException,
CssSection,
parse,
)
except ImportError:
print('ERROR!!! You need to run this via tools/test-tools.')
sys.exit(1)
class ParserTestHappyPath(unittest.TestCase):
def test_basic_parse(self) -> None:
my_selector = 'li.foo'
my_block = '''{
color: red;
}'''
my_css = my_selector + ' ' + my_block
res = parse(my_css)
self.assertEqual(res.text().strip(), 'li.foo {\n color: red;\n}')
section = cast(CssSection, res.sections[0])
block = section.declaration_block
self.assertEqual(block.text().strip(), '{\n color: red;\n}')
declaration = block.declarations[0]
self.assertEqual(declaration.css_property, 'color')
self.assertEqual(declaration.css_value.text().strip(), 'red')
def test_same_line_comment(self) -> None:
my_css = '''
li.hide {
display: none; /* comment here */
/* Not to be confused
with this comment */
color: green;
}'''
res = parse(my_css)
section = cast(CssSection, res.sections[0])
block = section.declaration_block
declaration = block.declarations[0]
self.assertIn('/* comment here */', declaration.text())
def test_no_semicolon(self) -> None:
my_css = '''
p { color: red }
'''
reformatted_css = 'p {\n color: red;\n}'
res = parse(my_css)
self.assertEqual(res.text().strip(), reformatted_css)
section = cast(CssSection, res.sections[0])
self.assertFalse(section.declaration_block.declarations[0].semicolon)
def test_empty_block(self) -> None:
my_css = '''
div {
}'''
error = 'Empty declaration'
with self.assertRaisesRegex(CssParserException, error):
parse(my_css)
def test_multi_line_selector(self) -> None:
my_css = '''
h1,
h2,
h3 {
top: 0
}'''
res = parse(my_css)
section = res.sections[0]
selectors = section.selector_list.selectors
self.assertEqual(len(selectors), 3)
def test_media_block(self) -> None:
my_css = '''
@media (max-width: 300px) {
h5 {
margin: 0;
}
}'''
res = parse(my_css)
self.assertEqual(len(res.sections), 1)
expected = '@media (max-width: 300px) {\n h5 {\n margin: 0;\n }\n}'
self.assertEqual(res.text().strip(), expected)
class ParserTestSadPath(unittest.TestCase):
'''
Use this class for tests that verify the parser will
appropriately choke on malformed CSS.
We prevent some things that are technically legal
in CSS, like having comments in the middle of list
of selectors. Some of this is just for expediency;
some of this is to enforce consistent formatting.
'''
def _assert_error(self, my_css: str, error: str) -> None:
with self.assertRaisesRegex(CssParserException, error):
parse(my_css)
def test_unexpected_end_brace(self) -> None:
my_css = '''
@media (max-width: 975px) {
body {
color: red;
}
}} /* whoops */'''
error = 'unexpected }'
self._assert_error(my_css, error)
def test_empty_section(self) -> None:
my_css = '''
/* nothing to see here, move along */
'''
error = 'unexpected empty section'
self._assert_error(my_css, error)
def test_missing_colon(self) -> None:
my_css = '''
.hide
{
display none /* no colon here */
}'''
error = 'We expect a colon here'
self._assert_error(my_css, error)
def test_unclosed_comment(self) -> None:
my_css = ''' /* comment with no end'''
error = 'unclosed comment'
self._assert_error(my_css, error)
def test_missing_selectors(self) -> None:
my_css = '''
/* no selectors here */
{
bottom: 0
}'''
error = 'Missing selector'
self._assert_error(my_css, error)
def test_missing_value(self) -> None:
my_css = '''
h1
{
bottom:
}'''
error = 'Missing value'
self._assert_error(my_css, error)
def test_disallow_comments_in_selectors(self) -> None:
my_css = '''
h1,
h2, /* comment here not allowed by Zulip */
h3 {
top: 0
}'''
error = 'Comments in selector section are not allowed'
self._assert_error(my_css, error)
| [
"str",
"str"
] | [
3191,
3203
] | [
3194,
3206
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_html_branches.py |
import unittest
import os
import tools.lib.template_parser
from tools.lib.html_branches import (
get_tag_info,
html_branches,
html_tag_tree,
build_id_dict,
split_for_id_and_class,
)
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEST_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_template_data")
class TestHtmlBranches(unittest.TestCase):
def test_get_tag_info(self) -> None:
html = """
<p id="test" class="test1 test2">foo</p>
"""
start_tag, end_tag = tools.lib.template_parser.tokenize(html)
start_tag_info = get_tag_info(start_tag)
end_tag_info = get_tag_info(end_tag)
self.assertEqual(start_tag_info.text(), 'p.test1.test2#test')
self.assertEqual(end_tag_info.text(), 'p')
def test_html_tag_tree(self) -> None:
html = """
<!-- test -->
<!DOCTYPE html>
<html>
<!-- test -->
<head>
<title>Test</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="style.css" />
</head>
<body>
<p>Hello<br />world!</p>
<p>Goodbye<!-- test -->world!</p>
</body>
</html>
<!-- test -->
"""
tree = html_tag_tree(html)
self.assertEqual(tree.children[0].token.kind, 'html_start')
self.assertEqual(tree.children[0].token.tag, 'html')
self.assertEqual(tree.children[0].children[0].token.kind, 'html_start')
self.assertEqual(tree.children[0].children[0].token.tag, 'head')
self.assertEqual(tree.children[0].children[0].children[0].token.kind, 'html_start')
self.assertEqual(tree.children[0].children[0].children[0].token.tag, 'title')
self.assertEqual(tree.children[0].children[1].token.kind, 'html_start')
self.assertEqual(tree.children[0].children[1].token.tag, 'body')
self.assertEqual(tree.children[0].children[1].children[0].token.kind, 'html_start')
self.assertEqual(tree.children[0].children[1].children[0].token.tag, 'p')
self.assertEqual(tree.children[0].children[1].children[0].children[0].token.kind, 'html_singleton')
self.assertEqual(tree.children[0].children[1].children[0].children[0].token.tag, 'br')
self.assertEqual(tree.children[0].children[1].children[1].token.kind, 'html_start')
self.assertEqual(tree.children[0].children[1].children[1].token.tag, 'p')
def test_html_branches(self) -> None:
html = """
<!-- test -->
<!DOCTYPE html>
<html>
<!-- test -->
<head>
<title>Test</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="style.css" />
</head>
<body>
<p>Hello<br />world!</p>
<p>Goodbye<!-- test -->world!</p>
</body>
</html>
<!-- test -->
"""
branches = html_branches(html)
self.assertEqual(branches[0].text(), 'html head title')
self.assertEqual(branches[1].text(), 'html body p br')
self.assertEqual(branches[2].text(), 'html body p')
self.assertEqual(branches[0].staircase_text(), '\n html\n head\n title\n')
self.assertEqual(branches[1].staircase_text(), '\n html\n body\n p\n br\n')
self.assertEqual(branches[2].staircase_text(), '\n html\n body\n p\n')
def test_build_id_dict(self) -> None:
templates = ["test_template1.html", "test_template2.html"]
templates = [os.path.join(TEST_TEMPLATES_DIR, fn) for fn in templates]
template_id_dict = build_id_dict(templates)
self.assertEqual(set(template_id_dict.keys()), {'below_navbar', 'hello_{{ message }}', 'intro'})
self.assertEqual(template_id_dict['hello_{{ message }}'], [
'Line 12:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),
'Line 12:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])
self.assertEqual(template_id_dict['intro'], [
'Line 10:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),
'Line 11:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),
'Line 11:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])
self.assertEqual(template_id_dict['below_navbar'], [
'Line 10:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])
def test_split_for_id_and_class(self) -> None:
id1 = "{{ red|blue }}"
id2 = "search_box_{{ page }}"
class1 = "chat_box message"
class2 = "stream_{{ topic }}"
class3 = "foo {{ a|b|c }} bar"
self.assertEqual(split_for_id_and_class(id1), ['{{ red|blue }}'])
self.assertEqual(split_for_id_and_class(id2), ['search_box_{{ page }}'])
self.assertEqual(split_for_id_and_class(class1), ['chat_box', 'message'])
self.assertEqual(split_for_id_and_class(class2), ['stream_{{ topic }}'])
self.assertEqual(split_for_id_and_class(class3), ['foo', '{{ a|b|c }}', 'bar'])
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_linter_custom_check.py | import os
from mock import patch
from unittest import TestCase
from typing import Any, Dict, List
from tools.linter_lib.custom_check import build_custom_checkers
from tools.linter_lib.custom_check import custom_check_file
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
class TestCustomRules(TestCase):
def setUp(self) -> None:
self.all_rules = [] # type: List[Dict[str, Any]]
with patch('tools.linter_lib.custom_check.custom_check_file', return_value=False) as mock_custom_check_file:
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html',
'json', 'md', 'txt', 'text', 'yaml', 'rst'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
self.all_rules.extend(rule_set)
def test_paths_in_rules(self) -> None:
"""Verifies that the paths mentioned in linter rules actually exist"""
for rule in self.all_rules:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
def test_rule_patterns(self) -> None:
"""Verifies that the search regex specified in a custom rule actually matches
the expectation and doesn't throw false positives."""
for rule in self.all_rules:
pattern = rule['pattern']
for line in rule.get('good_lines', []):
# create=True is superfluous when mocking built-ins in Python >= 3.5
with patch('builtins.open', return_value=iter((line+'\n\n').splitlines()), create=True, autospec=True):
self.assertFalse(custom_check_file('foo.bar', 'baz', [rule], ''),
"The pattern '{}' matched the line '{}' while it shouldn't.".format(pattern, line))
for line in rule.get('bad_lines', []):
# create=True is superfluous when mocking built-ins in Python >= 3.5
with patch('builtins.open',
return_value=iter((line+'\n\n').splitlines()), create=True, autospec=True), patch('builtins.print'):
filename = list(rule.get('include_only', {'foo.bar'}))[0]
self.assertTrue(custom_check_file(filename, 'baz', [rule], ''),
"The pattern '{}' didn't match the line '{}' while it should.".format(pattern, line))
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_pretty_print.py | import unittest
from tools.lib.pretty_print import pretty_print_html
# Note that GOOD_HTML isn't necessarily beautiful HTML. Apart
# from adjusting indentation, we mostly leave things alone to
# respect whatever line-wrapping styles were in place before.
BAD_HTML = """
<!-- test -->
<!DOCTYPE html>
<html>
<!-- test -->
<head>
<title>Test</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="style.css" />
</head>
<body>
<div><p>Hello<br />world!</p></div>
<p>Goodbye<!-- test -->world!</p>
<table>
<tr>
<td>5</td>
</tr>
</table>
<pre>
print 'hello world'
</pre>
<div class = "foo"
id = "bar"
role = "whatever">{{ bla }}</div>
</body>
</html>
<!-- test -->
"""
GOOD_HTML = """
<!-- test -->
<!DOCTYPE html>
<html>
<!-- test -->
<head>
<title>Test</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="style.css" />
</head>
<body>
<div><p>Hello<br />world!</p></div>
<p>Goodbye<!-- test -->world!</p>
<table>
<tr>
<td>5</td>
</tr>
</table>
<pre>
print 'hello world'
</pre>
<div class = "foo"
id = "bar"
role = "whatever">{{ bla }}</div>
</body>
</html>
<!-- test -->
"""
BAD_HTML1 = """
<html>
<body>
foobarfoobarfoo<b>bar</b>
</body>
</html>
"""
GOOD_HTML1 = """
<html>
<body>
foobarfoobarfoo<b>bar</b>
</body>
</html>
"""
BAD_HTML2 = """
<html>
<body>
{{# foobar area}}
foobarfoobarfoo<b>bar</b>
{{/ foobar area}}
</body>
</html>
"""
GOOD_HTML2 = """
<html>
<body>
{{# foobar area}}
foobarfoobarfoo<b>bar</b>
{{/ foobar area}}
</body>
</html>
"""
BAD_HTML3 = """
<html>
<body>
{{# foobar area}}
foobarfoobar<blockquote>
<p>
FOOBAR
</p>
</blockquote>
{{/ foobar area}}
</body>
</html>
"""
GOOD_HTML3 = """
<html>
<body>
{{# foobar area}}
foobarfoobar<blockquote>
<p>
FOOBAR
</p>
</blockquote>
{{/ foobar area}}
</body>
</html>
"""
BAD_HTML4 = """
<div>
foo
<p>hello</p>
bar
</div>
"""
GOOD_HTML4 = """
<div>
foo
<p>hello</p>
bar
</div>
"""
BAD_HTML5 = """
<div>
foo
{{#if foobar}}
hello
{{else}}
bye
{{/if}}
bar
</div>
"""
GOOD_HTML5 = """
<div>
foo
{{#if foobar}}
hello
{{else}}
bye
{{/if}}
bar
</div>
"""
BAD_HTML6 = """
<div>
<p> <strong> <span class = "whatever">foobar </span> </strong></p>
</div>
"""
GOOD_HTML6 = """
<div>
<p> <strong> <span class = "whatever">foobar </span> </strong></p>
</div>
"""
BAD_HTML7 = """
<div class="foobar">
<input type="foobar" name="temp" value="{{dyn_name}}"
{{#unless invite_only}}checked="checked"{{/unless}} /> {{dyn_name}}
{{#if invite_only}}<i class="fa fa-lock"></i>{{/if}}
</div>
"""
GOOD_HTML7 = """
<div class="foobar">
<input type="foobar" name="temp" value="{{dyn_name}}"
{{#unless invite_only}}checked="checked"{{/unless}} /> {{dyn_name}}
{{#if invite_only}}<i class="fa fa-lock"></i>{{/if}}
</div>
"""
BAD_HTML8 = """
{{#each test}}
{{#with this}}
{{#if foobar}}
<div class="anything">{{{test}}}</div>
{{/if}}
{{#if foobar2}}
{{partial "teststuff"}}
{{/if}}
{{/with}}
{{/each}}
"""
GOOD_HTML8 = """
{{#each test}}
{{#with this}}
{{#if foobar}}
<div class="anything">{{{test}}}</div>
{{/if}}
{{#if foobar2}}
{{partial "teststuff"}}
{{/if}}
{{/with}}
{{/each}}
"""
BAD_HTML9 = """
<form id="foobar" class="whatever">
{{! <div class="anothertest"> }}
<input value="test" />
<button type="button"><i class="test"></i></button>
<button type="button"><i class="test"></i></button>
{{! </div> }}
<div class="test"></div>
</form>
"""
GOOD_HTML9 = """
<form id="foobar" class="whatever">
{{! <div class="anothertest"> }}
<input value="test" />
<button type="button"><i class="test"></i></button>
<button type="button"><i class="test"></i></button>
{{! </div> }}
<div class="test"></div>
</form>
"""
BAD_HTML10 = """
{% block portico_content %}
<div class="test">
<i class='test'></i> foobar
</div>
<div class="test1">
{% for row in data %}
<div class="test2">
{% for group in (row[0:2], row[2:4]) %}
<div class="test2">
</div>
{% endfor %}
</div>
{% endfor %}
</div>
{% endblock %}
"""
GOOD_HTML10 = """
{% block portico_content %}
<div class="test">
<i class='test'></i> foobar
</div>
<div class="test1">
{% for row in data %}
<div class="test2">
{% for group in (row[0:2], row[2:4]) %}
<div class="test2">
</div>
{% endfor %}
</div>
{% endfor %}
</div>
{% endblock %}
"""
BAD_HTML11 = """
<div class="test1">
<div class="test2">
foobar
<div class="test2">
</div>
</div>
</div>
"""
GOOD_HTML11 = """
<div class="test1">
<div class="test2">
foobar
<div class="test2">
</div>
</div>
</div>
"""
BAD_HTML12 = """
<div class="test1">
<pre>
<div class="test2">
foobar
<div class="test2">
</div>
</div>
</pre>
</div>
"""
GOOD_HTML12 = """
<div class="test1">
<pre>
<div class="test2">
foobar
<div class="test2">
</div>
</div>
</pre>
</div>
"""
BAD_HTML13 = """
<div>
{{#if this.code}}
<div> :{{this.name}}:</div>
{{else}}
{{#if this.is_realm_emoji}}
<img src="{{this.url}}" class="emoji" />
{{else}}
<div/>
{{/if}}
{{/if}}
<div>{{this.count}}</div>
</div>
"""
GOOD_HTML13 = """
<div>
{{#if this.code}}
<div> :{{this.name}}:</div>
{{else}}
{{#if this.is_realm_emoji}}
<img src="{{this.url}}" class="emoji" />
{{else}}
<div/>
{{/if}}
{{/if}}
<div>{{this.count}}</div>
</div>
"""
BAD_HTML14 = """
<div>
{{#if this.code}}
<pre>Here goes some cool code.</pre>
{{else}}
<div>
content of first div
<div>
content of second div.
</div>
</div>
{{/if}}
</div>
"""
GOOD_HTML14 = """
<div>
{{#if this.code}}
<pre>Here goes some cool code.</pre>
{{else}}
<div>
content of first div
<div>
content of second div.
</div>
</div>
{{/if}}
</div>
"""
BAD_HTML15 = """
<div>
<img alt=":thumbs_up:"
class="emoji"
src="/path/to/png"
title=":thumbs_up:"/>
<img alt=":thumbs_up:"
class="emoji"
src="/path/to/png"
title=":thumbs_up:"/>
<img alt=":thumbs_up:"
title=":thumbs_up:"/>
</div>
"""
GOOD_HTML15 = """
<div>
<img alt=":thumbs_up:"
class="emoji"
src="/path/to/png"
title=":thumbs_up:"/>
<img alt=":thumbs_up:"
class="emoji"
src="/path/to/png"
title=":thumbs_up:"/>
<img alt=":thumbs_up:"
title=":thumbs_up:"/>
</div>
"""
BAD_HTML16 = """
<div>
{{partial "settings_checkbox"
"setting_name" "realm_name_in_notifications"
"is_checked" page_params.realm_name_in_notifications
"label" settings_label.realm_name_in_notifications}}
</div>
"""
GOOD_HTML16 = """
<div>
{{partial "settings_checkbox"
"setting_name" "realm_name_in_notifications"
"is_checked" page_params.realm_name_in_notifications
"label" settings_label.realm_name_in_notifications}}
</div>
"""
BAD_HTML17 = """
<div>
<button type="button"
class="btn btn-primary btn-small">{{t "Yes" }}</button>
<button type="button"
id="confirm_btn"
class="btn btn-primary btn-small">{{t "Yes" }}</button>
<div class = "foo"
id = "bar"
role = "whatever">
{{ bla }}
</div>
</div>
"""
GOOD_HTML17 = """
<div>
<button type="button"
class="btn btn-primary btn-small">{{t "Yes" }}</button>
<button type="button"
id="confirm_btn"
class="btn btn-primary btn-small">{{t "Yes" }}</button>
<div class = "foo"
id = "bar"
role = "whatever">
{{ bla }}
</div>
</div>
"""
class TestPrettyPrinter(unittest.TestCase):
def compare(self, a: str, b: str) -> None:
self.assertEqual(a.split('\n'), b.split('\n'))
def test_pretty_print(self) -> None:
self.compare(pretty_print_html(GOOD_HTML), GOOD_HTML)
self.compare(pretty_print_html(BAD_HTML), GOOD_HTML)
self.compare(pretty_print_html(BAD_HTML1), GOOD_HTML1)
self.compare(pretty_print_html(BAD_HTML2), GOOD_HTML2)
self.compare(pretty_print_html(BAD_HTML3), GOOD_HTML3)
self.compare(pretty_print_html(BAD_HTML4), GOOD_HTML4)
self.compare(pretty_print_html(BAD_HTML5), GOOD_HTML5)
self.compare(pretty_print_html(BAD_HTML6), GOOD_HTML6)
self.compare(pretty_print_html(BAD_HTML7), GOOD_HTML7)
self.compare(pretty_print_html(BAD_HTML8), GOOD_HTML8)
self.compare(pretty_print_html(BAD_HTML9), GOOD_HTML9)
self.compare(pretty_print_html(BAD_HTML10), GOOD_HTML10)
self.compare(pretty_print_html(BAD_HTML11), GOOD_HTML11)
self.compare(pretty_print_html(BAD_HTML12), GOOD_HTML12)
self.compare(pretty_print_html(BAD_HTML13), GOOD_HTML13)
self.compare(pretty_print_html(BAD_HTML14), GOOD_HTML14)
self.compare(pretty_print_html(BAD_HTML15), GOOD_HTML15)
self.compare(pretty_print_html(BAD_HTML16), GOOD_HTML16)
self.compare(pretty_print_html(BAD_HTML17), GOOD_HTML17)
| [
"str",
"str"
] | [
8328,
8336
] | [
8331,
8339
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/tests/test_template_parser.py | from typing import Optional, Any
import sys
import unittest
try:
from tools.lib.template_parser import (
TemplateParserException,
is_django_block_tag,
tokenize,
validate,
)
except ImportError:
print('ERROR!!! You need to run this via tools/test-tools.')
sys.exit(1)
class ParserTest(unittest.TestCase):
def _assert_validate_error(self, error: str, fn: Optional[str]=None,
text: Optional[str]=None, check_indent: bool=True) -> None:
with self.assertRaisesRegex(TemplateParserException, error):
validate(fn=fn, text=text, check_indent=check_indent)
def test_is_django_block_tag(self) -> None:
self.assertTrue(is_django_block_tag('block'))
self.assertFalse(is_django_block_tag('not a django tag'))
def test_validate_vanilla_html(self) -> None:
'''
Verify that validate() does not raise errors for
well-formed HTML.
'''
my_html = '''
<table>
<tr>
<td>foo</td>
</tr>
</table>'''
validate(text=my_html)
def test_validate_handlebars(self) -> None:
my_html = '''
{{#with stream}}
<p>{{stream}}</p>
{{/with}}
'''
validate(text=my_html)
def test_validate_comment(self) -> None:
my_html = '''
<!---
<h1>foo</h1>
-->'''
validate(text=my_html)
def test_validate_django(self) -> None:
my_html = '''
{% include "some_other.html" %}
{% if foo %}
<p>bar</p>
{% endif %}
'''
validate(text=my_html)
my_html = '''
{% block "content" %}
{% with className="class" %}
{% include 'foobar' %}
{% endwith %}
{% endblock %}
'''
validate(text=my_html)
def test_validate_no_start_tag(self) -> None:
my_html = '''
foo</p>
'''
self._assert_validate_error('No start tag', text=my_html)
def test_validate_mismatched_tag(self) -> None:
my_html = '''
<b>foo</i>
'''
self._assert_validate_error('Mismatched tag.', text=my_html)
def test_validate_bad_indentation(self) -> None:
my_html = '''
<p>
foo
</p>
'''
self._assert_validate_error('Bad indentation.', text=my_html, check_indent=True)
def test_validate_state_depth(self) -> None:
my_html = '''
<b>
'''
self._assert_validate_error('Missing end tag', text=my_html)
def test_validate_incomplete_handlebars_tag_1(self) -> None:
my_html = '''
{{# foo
'''
self._assert_validate_error('''Tag missing "}}" at Line 2 Col 13:"{{# foo
"''', text=my_html)
def test_validate_incomplete_handlebars_tag_2(self) -> None:
my_html = '''
{{# foo }
'''
self._assert_validate_error('Tag missing "}}" at Line 2 Col 13:"{{# foo }\n"', text=my_html)
def test_validate_incomplete_django_tag_1(self) -> None:
my_html = '''
{% foo
'''
self._assert_validate_error('''Tag missing "%}" at Line 2 Col 13:"{% foo
"''', text=my_html)
def test_validate_incomplete_django_tag_2(self) -> None:
my_html = '''
{% foo %
'''
self._assert_validate_error('Tag missing "%}" at Line 2 Col 13:"{% foo %\n"', text=my_html)
def test_validate_incomplete_html_tag_1(self) -> None:
my_html = '''
<b
'''
self._assert_validate_error('''Tag missing ">" at Line 2 Col 13:"<b
"''', text=my_html)
def test_validate_incomplete_html_tag_2(self) -> None:
my_html = '''
<a href="
'''
my_html1 = '''
<a href=""
'''
self._assert_validate_error('''Tag missing ">" at Line 2 Col 13:"<a href=""
"''', text=my_html1)
self._assert_validate_error('''Unbalanced Quotes at Line 2 Col 13:"<a href="
"''', text=my_html)
def test_validate_empty_html_tag(self) -> None:
my_html = '''
< >
'''
self._assert_validate_error('Tag name missing', text=my_html)
def test_code_blocks(self) -> None:
# This is fine.
my_html = '''
<code>
x = 5
y = x + 1
</code>'''
validate(text=my_html)
# This is also fine.
my_html = "<code>process_widgets()</code>"
validate(text=my_html)
# This is illegal.
my_html = '''
<code>x =
5</code>
'''
self._assert_validate_error('Code tag is split across two lines.', text=my_html)
def test_anchor_blocks(self) -> None:
# This is allowed, although strange.
my_html = '''
<a hef="/some/url">
Click here
for more info.
</a>'''
validate(text=my_html)
# This is fine.
my_html = '<a href="/some/url">click here</a>'
validate(text=my_html)
# Even this is fine.
my_html = '''
<a class="twitter-timeline" href="https://twitter.com/ZulipStatus"
data-widget-id="443457763394334720"
data-screen-name="ZulipStatus"
>@ZulipStatus on Twitter</a>.
'''
validate(text=my_html)
def test_tokenize(self) -> None:
tag = '<meta whatever>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_special')
tag = '<a>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_start')
self.assertEqual(token.tag, 'a')
tag = '<br />bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_singleton')
self.assertEqual(token.tag, 'br')
tag = '<input>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_singleton')
self.assertEqual(token.tag, 'input')
tag = '<input />bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_singleton')
self.assertEqual(token.tag, 'input')
tag = '</a>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_end')
self.assertEqual(token.tag, 'a')
tag = '{{#with foo}}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'handlebars_start')
self.assertEqual(token.tag, 'with')
tag = '{{/with}}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'handlebars_end')
self.assertEqual(token.tag, 'with')
tag = '{% if foo %}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'django_start')
self.assertEqual(token.tag, 'if')
tag = '{% endif %}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'django_end')
self.assertEqual(token.tag, 'if')
| [
"str"
] | [
398
] | [
401
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/zulint/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/zulint/command.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import argparse
import logging
import os
import subprocess
import sys
from typing import Any, Callable, Dict, List, Optional
from zulint.printer import print_err, colors
def add_default_linter_arguments(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument('--modified', '-m',
action='store_true',
help='Only check modified files')
parser.add_argument('--verbose', '-v',
action='store_true',
help='Print verbose timing output')
parser.add_argument('targets',
nargs='*',
help='Specify directories to check')
def run_parallel(lint_functions):
# type: (Dict[str, Callable[[], int]]) -> bool
pids = []
for name, func in lint_functions.items():
pid = os.fork()
if pid == 0:
logging.info("start " + name)
result = func()
logging.info("finish " + name)
sys.stdout.flush()
sys.stderr.flush()
os._exit(result)
pids.append(pid)
failed = False
for pid in pids:
(_, status) = os.waitpid(pid, 0)
if status != 0:
failed = True
return failed
class LinterConfig:
lint_functions = {} # type: Dict[str, Callable[[], int]]
def __init__(self, by_lang):
# type: (Any) -> None
self.by_lang = by_lang
def lint(self, func):
# type: (Callable[[], int]) -> Callable[[], int]
self.lint_functions[func.__name__] = func
return func
def external_linter(self, name, command, target_langs=[]):
# type: (str, List[str], List[str]) -> None
"""Registers an external linter program to be run as part of the
linter. This program will be passed the subset of files being
linted that have extensions in target_langs. If there are no
such files, exits without doing anything.
If target_langs is empty, just runs the linter unconditionally.
"""
color = next(colors)
def run_linter():
# type: () -> int
targets = [] # type: List[str]
if len(target_langs) != 0:
targets = [target for lang in target_langs for target in self.by_lang[lang]]
if len(targets) == 0:
# If this linter has a list of languages, and
# no files in those languages are to be checked,
# then we can safely return success without
# invoking the external linter.
return 0
p = subprocess.Popen(command + targets,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
assert p.stdout # use of subprocess.PIPE indicates non-None
for line in iter(p.stdout.readline, b''):
print_err(name, color, line)
return p.wait() # Linter exit code
self.lint_functions[name] = run_linter
def do_lint(self):
# type: () -> None
failed = run_parallel(self.lint_functions)
sys.exit(1 if failed else 0)
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/zulint/linters.py | from __future__ import print_function
from __future__ import absolute_import
import subprocess
from typing import List
from zulint.printer import print_err, colors
def run_pycodestyle(files, ignored_rules):
# type: (List[str], List[str]) -> bool
if len(files) == 0:
return False
failed = False
color = next(colors)
pep8 = subprocess.Popen(
['pycodestyle'] + files + ['--ignore={rules}'.format(rules=','.join(ignored_rules))],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
assert pep8.stdout is not None # Implied by use of subprocess.PIPE
for line in iter(pep8.stdout.readline, b''):
print_err('pep8', color, line)
failed = True
return failed
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/zulint/lister.py | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import subprocess
import re
from collections import defaultdict
import argparse
from six.moves import filter
from typing import Union, List, Dict
def get_ftype(fpath, use_shebang):
# type: (str, bool) -> str
ext = os.path.splitext(fpath)[1]
if ext:
return ext[1:]
elif use_shebang:
# opening a file may throw an OSError
with open(fpath) as f:
first_line = f.readline()
if re.search(r'^#!.*\bpython', first_line):
return 'py'
elif re.search(r'^#!.*sh', first_line):
return 'sh'
elif re.search(r'^#!.*\bperl', first_line):
return 'pl'
elif re.search(r'^#!.*\bnode', first_line):
return 'js'
elif re.search(r'^#!.*\bruby', first_line):
return 'rb'
elif re.search(r'^#!', first_line):
print('Error: Unknown shebang in file "%s":\n%s' % (fpath, first_line), file=sys.stderr)
return ''
else:
return ''
else:
return ''
def list_files(targets: List[str]=[], ftypes: List[str]=[], use_shebang: bool=True,
modified_only: bool=False, exclude: List[str]=[], group_by_ftype: bool=False,
extless_only: bool=False) -> Union[Dict[str, List[str]], List[str]]:
"""
List files tracked by git.
Returns a list of files which are either in targets or in directories in targets.
If targets is [], list of all tracked files in current directory is returned.
Other arguments:
ftypes - List of file types on which to filter the search.
If ftypes is [], all files are included.
use_shebang - Determine file type of extensionless files from their shebang.
modified_only - Only include files which have been modified.
exclude - List of files or directories to be excluded, relative to repository root.
group_by_ftype - If True, returns a dict of lists keyed by file type.
If False, returns a flat list of files.
extless_only - Only include extensionless files in output.
"""
ftypes = [x.strip('.') for x in ftypes]
ftypes_set = set(ftypes)
# Really this is all bytes -- it's a file path -- but we get paths in
# sys.argv as str, so that battle is already lost. Settle for hoping
# everything is UTF-8.
repository_root = subprocess.check_output(['git', 'rev-parse',
'--show-toplevel']).strip().decode('utf-8')
exclude_abspaths = [os.path.abspath(os.path.join(repository_root, fpath)) for fpath in exclude]
cmdline = ['git', 'ls-files'] + targets
if modified_only:
cmdline.append('-m')
files_gen = (x.strip() for x in subprocess.check_output(cmdline, universal_newlines=True).split('\n'))
# throw away empty lines and non-files (like symlinks)
files = list(filter(os.path.isfile, files_gen))
result_dict = defaultdict(list) # type: Dict[str, List[str]]
result_list = [] # type: List[str]
for fpath in files:
# this will take a long time if exclude is very large
ext = os.path.splitext(fpath)[1]
if extless_only and ext:
continue
absfpath = os.path.abspath(fpath)
if any(absfpath == expath or absfpath.startswith(os.path.abspath(expath) + os.sep)
for expath in exclude_abspaths):
continue
if ftypes or group_by_ftype:
try:
filetype = get_ftype(fpath, use_shebang)
except (OSError, UnicodeDecodeError) as e:
etype = e.__class__.__name__
print('Error: %s while determining type of file "%s":' % (etype, fpath), file=sys.stderr)
print(e, file=sys.stderr)
filetype = ''
if ftypes and filetype not in ftypes_set:
continue
if group_by_ftype:
result_dict[filetype].append(fpath)
else:
result_list.append(fpath)
if group_by_ftype:
return result_dict
else:
return result_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="List files tracked by git and optionally filter by type")
parser.add_argument('targets', nargs='*', default=[],
help='''files and directories to include in the result.
If this is not specified, the current directory is used''')
parser.add_argument('-m', '--modified', action='store_true', default=False,
help='list only modified files')
parser.add_argument('-f', '--ftypes', nargs='+', default=[],
help="list of file types to filter on. "
"All files are included if this option is absent")
parser.add_argument('--ext-only', dest='extonly', action='store_true', default=False,
help='only use extension to determine file type')
parser.add_argument('--exclude', nargs='+', default=[],
help='list of files and directories to exclude from results, relative to repo root')
parser.add_argument('--extless-only', dest='extless_only', action='store_true', default=False,
help='only include extensionless files in output')
args = parser.parse_args()
listing = list_files(targets=args.targets, ftypes=args.ftypes, use_shebang=not args.extonly,
modified_only=args.modified, exclude=args.exclude, extless_only=args.extless_only)
for l in listing:
print(l)
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | tools/zulint/printer.py | from __future__ import print_function
from __future__ import absolute_import
import sys
import os
from itertools import cycle
from typing import Union, Text
# Terminal Color codes for use in differentiatng linters
BOLDRED = '\x1B[1;31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
ENDC = '\033[0m'
colors = cycle([GREEN, YELLOW, BLUE, MAGENTA, CYAN])
def print_err(name, color, line):
# type: (str, str, Union[Text, bytes]) -> None
# Decode with UTF-8 if in Python 3 and `line` is of bytes type.
# (Python 2 does this automatically)
if sys.version_info[0] == 3 and isinstance(line, bytes):
line = line.decode('utf-8')
print('{}{}{}|{end} {}{}{end}'.format(
color,
name,
' ' * max(0, 10 - len(name)),
BOLDRED,
line.rstrip(),
end=ENDC)
)
# Python 2's print function does not have a `flush` option.
sys.stdout.flush()
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | version.py | ZULIP_VERSION = "1.9.0+git"
LATEST_MAJOR_VERSION = "1.9"
LATEST_RELEASE_VERSION = "1.9.0"
LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2018/11/07/zulip-1-9-released/"
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
# the major version to indicate that folks should provision in both
# directions.
# Typically, adding a dependency only requires a minor version bump, and
# removing a dependency requires a major version bump.
PROVISION_VERSION = '26.14'
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/__init__.py | # Load AppConfig app subclass by default on django applications initialization
default_app_config = 'zerver.apps.ZerverConfig'
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/apps.py |
import logging
from typing import Any, Dict
from django.apps import AppConfig
from django.conf import settings
from django.core.cache import cache
from django.db.models.signals import post_migrate
def flush_cache(sender: AppConfig, **kwargs: Any) -> None:
logging.info("Clearing memcached cache after migrations")
cache.clear()
class ZerverConfig(AppConfig):
name = "zerver" # type: str
def ready(self) -> None:
import zerver.signals
if settings.POST_MIGRATION_CACHE_FLUSHING:
post_migrate.connect(flush_cache, sender=self)
| [
"AppConfig",
"Any"
] | [
224,
245
] | [
233,
248
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/context_processors.py |
from typing import Any, Dict, List, Optional
from django.http import HttpRequest
from django.conf import settings
from zerver.models import UserProfile, get_realm, Realm
from zproject.backends import (
any_oauth_backend_enabled,
dev_auth_enabled,
github_auth_enabled,
google_auth_enabled,
password_auth_enabled,
email_auth_enabled,
require_email_format_usernames,
auth_enabled_helper,
AUTH_BACKEND_NAME_MAP
)
from zerver.lib.bugdown import convert as bugdown_convert
from zerver.lib.send_email import FromAddress
from zerver.lib.subdomains import get_subdomain
from zerver.lib.realm_icon import get_realm_icon_url
from version import ZULIP_VERSION, LATEST_RELEASE_VERSION, \
LATEST_RELEASE_ANNOUNCEMENT, LATEST_MAJOR_VERSION
def common_context(user: UserProfile) -> Dict[str, Any]:
"""Common context used for things like outgoing emails that don't
have a request.
"""
return {
'realm_uri': user.realm.uri,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'external_host': settings.EXTERNAL_HOST,
}
def get_realm_from_request(request: HttpRequest) -> Optional[Realm]:
if hasattr(request, "user") and hasattr(request.user, "realm"):
return request.user.realm
subdomain = get_subdomain(request)
return get_realm(subdomain)
def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
"""Context available to all Zulip Jinja2 templates that have a request
passed in. Designed to provide the long list of variables at the
bottom of this function in a wide range of situations: logged-in
or logged-out, subdomains or not, etc.
The main variable in the below is whether we know what realm the
user is trying to interact with.
"""
realm = get_realm_from_request(request)
if realm is None:
realm_uri = settings.ROOT_DOMAIN_URI
realm_name = None
realm_icon = None
realm_description = None
realm_invite_required = False
realm_plan_type = 0
else:
realm_uri = realm.uri
realm_name = realm.name
realm_icon = get_realm_icon_url(realm)
realm_description_raw = realm.description or "The coolest place in the universe."
realm_description = bugdown_convert(realm_description_raw, message_realm=realm)
realm_invite_required = realm.invite_required
realm_plan_type = realm.plan_type
register_link_disabled = settings.REGISTER_LINK_DISABLED
login_link_disabled = settings.LOGIN_LINK_DISABLED
find_team_link_disabled = settings.FIND_TEAM_LINK_DISABLED
allow_search_engine_indexing = False
if (settings.ROOT_DOMAIN_LANDING_PAGE
and get_subdomain(request) == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN):
register_link_disabled = True
login_link_disabled = True
find_team_link_disabled = False
allow_search_engine_indexing = True
apps_page_url = 'https://zulipchat.com/apps/'
if settings.ZILENCER_ENABLED:
apps_page_url = '/apps/'
user_is_authenticated = False
if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated'):
user_is_authenticated = request.user.is_authenticated.value
if settings.DEVELOPMENT:
secrets_path = "zproject/dev-secrets.conf"
settings_path = "zproject/dev_settings.py"
settings_comments_path = "zproject/prod_settings_template.py"
else:
secrets_path = "/etc/zulip/zulip-secrets.conf"
settings_path = "/etc/zulip/settings.py"
settings_comments_path = "/etc/zulip/settings.py"
if hasattr(request, "client") and request.client.name == "ZulipElectron":
platform = "ZulipElectron" # nocoverage
else:
platform = "ZulipWeb"
return {
'root_domain_landing_page': settings.ROOT_DOMAIN_LANDING_PAGE,
'custom_logo_url': settings.CUSTOM_LOGO_URL,
'register_link_disabled': register_link_disabled,
'login_link_disabled': login_link_disabled,
'terms_of_service': settings.TERMS_OF_SERVICE,
'privacy_policy': settings.PRIVACY_POLICY,
'login_url': settings.HOME_NOT_LOGGED_IN,
'only_sso': settings.ONLY_SSO,
'external_host': settings.EXTERNAL_HOST,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'realm_invite_required': realm_invite_required,
'realm_uri': realm_uri,
'realm_name': realm_name,
'realm_icon': realm_icon,
'realm_description': realm_description,
'realm_plan_type': realm_plan_type,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'apps_page_url': apps_page_url,
'open_realm_creation': settings.OPEN_REALM_CREATION,
'password_auth_enabled': password_auth_enabled(realm),
'dev_auth_enabled': dev_auth_enabled(realm),
'google_auth_enabled': google_auth_enabled(realm),
'github_auth_enabled': github_auth_enabled(realm),
'email_auth_enabled': email_auth_enabled(realm),
'require_email_format_usernames': require_email_format_usernames(realm),
'any_oauth_backend_enabled': any_oauth_backend_enabled(realm),
'no_auth_enabled': not auth_enabled_helper(list(AUTH_BACKEND_NAME_MAP.keys()), realm),
'development_environment': settings.DEVELOPMENT,
'support_email': FromAddress.SUPPORT,
'find_team_link_disabled': find_team_link_disabled,
'password_min_length': settings.PASSWORD_MIN_LENGTH,
'password_min_guesses': settings.PASSWORD_MIN_GUESSES,
'jitsi_server_url': settings.JITSI_SERVER_URL,
'two_factor_authentication_enabled': settings.TWO_FACTOR_AUTHENTICATION_ENABLED,
'zulip_version': ZULIP_VERSION,
'latest_release_version': LATEST_RELEASE_VERSION,
'latest_major_version': LATEST_MAJOR_VERSION,
'latest_release_announcement': LATEST_RELEASE_ANNOUNCEMENT,
'user_is_authenticated': user_is_authenticated,
'settings_path': settings_path,
'secrets_path': secrets_path,
'settings_comments_path': settings_comments_path,
'platform': platform,
'allow_search_engine_indexing': allow_search_engine_indexing,
}
| [
"UserProfile",
"HttpRequest",
"HttpRequest"
] | [
794,
1180,
1422
] | [
805,
1191,
1433
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/gitter.py | import os
import dateutil.parser
import random
import requests
import logging
import shutil
import subprocess
import ujson
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.timezone import now as timezone_now
from typing import Any, Dict, List, Set, Tuple
from zerver.models import Realm, UserProfile, Recipient
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \
build_avatar, build_subscription, build_recipient, build_usermessages, \
build_defaultstream, process_avatars, build_realm, build_stream, \
build_message, create_converted_data_files, make_subscriber_map
# stubs
GitterDataT = List[Dict[str, Any]]
realm_id = 0
def gitter_workspace_to_realm(domain_name: str, gitter_data: GitterDataT,
realm_subdomain: str) -> Tuple[ZerverFieldsT,
List[ZerverFieldsT],
Dict[str, int]]:
"""
Returns:
1. realm, Converted Realm data
2. avatars, which is list to map avatars to zulip avatar records.json
3. user_map, which is a dictionary to map from gitter user id to zulip user id
"""
NOW = float(timezone_now().timestamp())
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Gitter') # type: List[ZerverFieldsT]
realm = build_realm(zerver_realm, realm_id, domain_name)
zerver_userprofile, avatars, user_map = build_userprofile(int(NOW), domain_name, gitter_data)
zerver_stream, zerver_defaultstream = build_stream_and_defaultstream(int(NOW))
zerver_recipient, zerver_subscription = build_recipient_and_subscription(
zerver_userprofile, zerver_stream)
realm['zerver_userprofile'] = zerver_userprofile
realm['zerver_stream'] = zerver_stream
realm['zerver_defaultstream'] = zerver_defaultstream
realm['zerver_recipient'] = zerver_recipient
realm['zerver_subscription'] = zerver_subscription
return realm, avatars, user_map
def build_userprofile(timestamp: Any, domain_name: str,
gitter_data: GitterDataT) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
Dict[str, int]]:
"""
Returns:
1. zerver_userprofile, which is a list of user profile
2. avatar_list, which is list to map avatars to zulip avatard records.json
3. added_users, which is a dictionary to map from gitter user id to zulip id
"""
logging.info('######### IMPORTING USERS STARTED #########\n')
zerver_userprofile = []
avatar_list = [] # type: List[ZerverFieldsT]
user_map = {} # type: Dict[str, int]
user_id = 0
for data in gitter_data:
if data['fromUser']['id'] not in user_map:
user_data = data['fromUser']
user_map[user_data['id']] = user_id
email = get_user_email(user_data, domain_name)
build_avatar(user_id, realm_id, email, user_data['avatarUrl'],
timestamp, avatar_list)
# Build userprofile object
userprofile = UserProfile(
full_name=user_data['displayName'],
short_name=user_data['username'],
id=user_id,
email=email,
delivery_email=email,
avatar_source='U',
pointer=-1,
date_joined=timestamp,
last_login=timestamp)
userprofile_dict = model_to_dict(userprofile)
# Set realm id separately as the corresponding realm is not yet a Realm model
# instance
userprofile_dict['realm'] = realm_id
zerver_userprofile.append(userprofile_dict)
user_id += 1
logging.info('######### IMPORTING USERS FINISHED #########\n')
return zerver_userprofile, avatar_list, user_map
def get_user_email(user_data: ZerverFieldsT, domain_name: str) -> str:
# TODO Get user email from github
email = ("%s@users.noreply.github.com" % user_data['username'])
return email
def build_stream_and_defaultstream(timestamp: Any) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT]]:
logging.info('######### IMPORTING STREAM STARTED #########\n')
# We have only one stream for gitter export
stream_name = 'from gitter'
stream_description = "Imported from gitter"
stream_id = 0
stream = build_stream(timestamp, realm_id, stream_name, stream_description,
stream_id)
defaultstream = build_defaultstream(realm_id=realm_id, stream_id=stream_id,
defaultstream_id=0)
logging.info('######### IMPORTING STREAMS FINISHED #########\n')
return [stream], [defaultstream]
def build_recipient_and_subscription(
zerver_userprofile: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_recipient, which is a list of mapped recipient
2. zerver_subscription, which is a list of mapped subscription
"""
zerver_recipient = []
zerver_subscription = []
recipient_id = subscription_id = 0
# For stream
# We have only one recipient, because we have only one stream
# Hence 'recipient_id'=0 corresponds to 'stream_id'=0
recipient = build_recipient(0, recipient_id, Recipient.STREAM)
zerver_recipient.append(recipient)
for user in zerver_userprofile:
subscription = build_subscription(recipient_id, user['id'], subscription_id)
zerver_subscription.append(subscription)
subscription_id += 1
recipient_id += 1
# For users
for user in zerver_userprofile:
recipient = build_recipient(user['id'], recipient_id, Recipient.PERSONAL)
subscription = build_subscription(recipient_id, user['id'], subscription_id)
zerver_recipient.append(recipient)
zerver_subscription.append(subscription)
recipient_id += 1
subscription_id += 1
return zerver_recipient, zerver_subscription
def convert_gitter_workspace_messages(gitter_data: GitterDataT, output_dir: str,
subscriber_map: Dict[int, Set[int]],
user_map: Dict[str, int],
user_short_name_to_full_name: Dict[str, str],
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:
"""
Messages are stored in batches
"""
logging.info('######### IMPORTING MESSAGES STARTED #########\n')
message_id = 0
recipient_id = 0 # Corresponding to stream "gitter"
low_index = 0
upper_index = low_index + chunk_size
dump_file_id = 1
while True:
message_json = {}
zerver_message = []
zerver_usermessage = [] # type: List[ZerverFieldsT]
message_data = gitter_data[low_index: upper_index]
if len(message_data) == 0:
break
for message in message_data:
message_time = dateutil.parser.parse(message['sent']).timestamp()
mentioned_user_ids = get_usermentions(message, user_map,
user_short_name_to_full_name)
rendered_content = None
topic_name = 'imported from gitter'
user_id = user_map[message['fromUser']['id']]
zulip_message = build_message(topic_name, float(message_time), message_id, message['text'],
rendered_content, user_id, recipient_id)
zerver_message.append(zulip_message)
build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=mentioned_user_ids,
message_id=message_id,
)
message_id += 1
message_json['zerver_message'] = zerver_message
message_json['zerver_usermessage'] = zerver_usermessage
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
logging.info("Writing Messages to %s\n" % (message_filename,))
write_data_to_file(os.path.join(message_filename), message_json)
low_index = upper_index
upper_index = chunk_size + low_index
dump_file_id += 1
logging.info('######### IMPORTING MESSAGES FINISHED #########\n')
def get_usermentions(message: Dict[str, Any], user_map: Dict[str, int],
user_short_name_to_full_name: Dict[str, str]) -> List[int]:
mentioned_user_ids = []
if 'mentions' in message:
for mention in message['mentions']:
if mention.get('userId') in user_map:
gitter_mention = '@%s' % (mention['screenName'])
zulip_mention = ('@**%s**' %
(user_short_name_to_full_name[mention['screenName']]))
message['text'] = message['text'].replace(gitter_mention, zulip_mention)
mentioned_user_ids.append(user_map[mention['userId']])
return mentioned_user_ids
def do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> None:
# Subdomain is set by the user while running the import commands
realm_subdomain = ""
domain_name = settings.EXTERNAL_HOST
os.makedirs(output_dir, exist_ok=True)
# output directory should be empty initially
if os.listdir(output_dir):
raise Exception("Output directory should be empty!")
# Read data from the gitter file
with open(gitter_data_file, "r") as fp:
gitter_data = ujson.load(fp)
realm, avatar_list, user_map = gitter_workspace_to_realm(
domain_name, gitter_data, realm_subdomain)
subscriber_map = make_subscriber_map(
zerver_subscription=realm['zerver_subscription'],
)
# For user mentions
user_short_name_to_full_name = {}
for userprofile in realm['zerver_userprofile']:
user_short_name_to_full_name[userprofile['short_name']] = userprofile['full_name']
convert_gitter_workspace_messages(
gitter_data, output_dir, subscriber_map, user_map,
user_short_name_to_full_name)
avatar_folder = os.path.join(output_dir, 'avatars')
avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))
os.makedirs(avatar_realm_folder, exist_ok=True)
avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads)
attachment = {"zerver_attachment": []} # type: Dict[str, List[Any]]
# IO realm.json
create_converted_data_files(realm, output_dir, '/realm.json')
# IO emoji records
create_converted_data_files([], output_dir, '/emoji/records.json')
# IO avatar records
create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')
# IO uploads records
create_converted_data_files([], output_dir, '/uploads/records.json')
# IO attachments records
create_converted_data_files(attachment, output_dir, '/attachment.json')
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
logging.info('######### DATA CONVERSION FINISHED #########\n')
logging.info("Zulip data dump created at %s" % (output_dir))
def write_data_to_file(output_file: str, data: Any) -> None:
with open(output_file, "w") as f:
f.write(ujson.dumps(data, indent=4))
| [
"str",
"GitterDataT",
"str",
"Any",
"str",
"GitterDataT",
"ZerverFieldsT",
"str",
"Any",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"GitterDataT",
"str",
"Dict[int, Set[int]]",
"Dict[str, int]",
"Dict[str, str]",
"Dict[str, Any]",
"Dict[str, int]",
"Dict[str, str]",
"str",
"str",
"str",
"Any"
] | [
814,
832,
892,
2148,
2166,
2206,
4073,
4101,
4284,
5041,
5081,
6394,
6419,
6478,
6547,
6631,
8778,
8804,
8871,
9481,
9498,
11617,
11628
] | [
817,
843,
895,
2151,
2169,
2217,
4086,
4104,
4287,
5060,
5100,
6405,
6422,
6497,
6561,
6645,
8792,
8818,
8885,
9484,
9501,
11620,
11631
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/hipchat.py | import base64
import dateutil
import glob
import logging
import os
import re
import shutil
import subprocess
import ujson
from typing import Any, Callable, Dict, List, Optional, Set
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.timezone import now as timezone_now
from zerver.lib.utils import (
process_list_in_batches,
)
from zerver.models import (
RealmEmoji,
Recipient,
UserProfile,
)
from zerver.data_import.import_util import (
build_message,
build_realm,
build_realm_emoji,
build_recipients,
build_stream,
build_personal_subscriptions,
build_public_stream_subscriptions,
build_private_stream_subscriptions,
build_user_message,
build_user_profile,
build_zerver_realm,
create_converted_data_files,
make_subscriber_map,
write_avatar_png,
)
from zerver.data_import.hipchat_attachment import AttachmentHandler
from zerver.data_import.hipchat_user import UserHandler
from zerver.data_import.hipchat_subscriber import SubscriberHandler
from zerver.data_import.sequencer import NEXT_ID, IdMapper
# stubs
ZerverFieldsT = Dict[str, Any]
def str_date_to_float(date_str: str) -> float:
'''
Dates look like this:
"2018-08-08T14:23:54Z 626267"
'''
parts = date_str.split(' ')
time_str = parts[0].replace('T', ' ')
date_time = dateutil.parser.parse(time_str)
timestamp = date_time.timestamp()
if len(parts) == 2:
microseconds = int(parts[1])
timestamp += microseconds / 1000000.0
return timestamp
def untar_input_file(tar_file: str) -> str:
data_dir = tar_file.replace('.tar', '')
data_dir = os.path.abspath(data_dir)
if os.path.exists(data_dir):
logging.info('input data was already untarred to %s, we will use it' % (data_dir,))
return data_dir
os.makedirs(data_dir)
subprocess.check_call(['tar', '-xf', tar_file, '-C', data_dir])
logging.info('input data was untarred to %s' % (data_dir,))
return data_dir
def read_user_data(data_dir: str) -> List[ZerverFieldsT]:
fn = 'users.json'
data_file = os.path.join(data_dir, fn)
with open(data_file, "r") as fp:
return ujson.load(fp)
def convert_user_data(user_handler: UserHandler,
user_id_mapper: IdMapper,
raw_data: List[ZerverFieldsT],
realm_id: int) -> None:
flat_data = [
d['User']
for d in raw_data
]
def process(in_dict: ZerverFieldsT) -> ZerverFieldsT:
delivery_email = in_dict['email']
email = in_dict['email']
full_name = in_dict['name']
id = user_id_mapper.get(in_dict['id'])
is_realm_admin = in_dict['account_type'] == 'admin'
is_guest = in_dict['account_type'] == 'guest'
is_mirror_dummy = False
short_name = in_dict['mention_name']
timezone = in_dict['timezone']
date_joined = int(timezone_now().timestamp())
is_active = not in_dict['is_deleted']
if not email:
if is_guest:
# Hipchat guest users don't have emails, so
# we just fake them.
email = 'guest-{id}@example.com'.format(id=id)
delivery_email = email
else:
# Hipchat sometimes doesn't export an email for deactivated users.
assert not is_active
email = delivery_email = "deactivated-{id}@example.com".format(id=id)
# unmapped fields:
# title - Developer, Project Manager, etc.
# rooms - no good sample data
# created - we just use "now"
# roles - we just use account_type
if in_dict.get('avatar'):
avatar_source = 'U'
else:
avatar_source = 'G'
return build_user_profile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=id,
is_active=is_active,
is_realm_admin=is_realm_admin,
is_guest=is_guest,
is_mirror_dummy=is_mirror_dummy,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
for raw_item in flat_data:
user = process(raw_item)
user_handler.add_user(user)
def convert_avatar_data(avatar_folder: str,
raw_data: List[ZerverFieldsT],
realm_id: int) -> List[ZerverFieldsT]:
'''
This code is pretty specific to how Hipchat sends us data.
They give us the avatar payloads in base64 in users.json.
We process avatars in our own pass of that data, rather
than doing it while we're getting other user data. I
chose to keep this separate, as otherwise you have a lot
of extraneous data getting passed around.
This code has MAJOR SIDE EFFECTS--namely writing a bunch
of files to the avatars directory.
'''
flat_data = [
d['User']
for d in raw_data
if d.get('avatar')
]
def process(raw_user: ZerverFieldsT) -> ZerverFieldsT:
avatar_payload = raw_user['avatar']
bits = base64.b64decode(avatar_payload)
user_id = raw_user['id']
metadata = write_avatar_png(
avatar_folder=avatar_folder,
realm_id=realm_id,
user_id=user_id,
bits=bits,
)
return metadata
avatar_records = list(map(process, flat_data))
return avatar_records
def read_room_data(data_dir: str) -> List[ZerverFieldsT]:
fn = 'rooms.json'
data_file = os.path.join(data_dir, fn)
with open(data_file) as f:
data = ujson.load(f)
return data
def convert_room_data(raw_data: List[ZerverFieldsT],
subscriber_handler: SubscriberHandler,
stream_id_mapper: IdMapper,
user_id_mapper: IdMapper,
realm_id: int) -> List[ZerverFieldsT]:
flat_data = [
d['Room']
for d in raw_data
]
def invite_only(v: str) -> bool:
if v == 'public':
return False
elif v == 'private':
return True
else:
raise Exception('unexpected value')
def process(in_dict: ZerverFieldsT) -> ZerverFieldsT:
now = int(timezone_now().timestamp())
stream_id = stream_id_mapper.get(in_dict['id'])
out_dict = build_stream(
date_created=now,
realm_id=realm_id,
name=in_dict['name'],
description=in_dict['topic'],
stream_id=stream_id,
deactivated=in_dict['is_archived'],
invite_only=invite_only(in_dict['privacy']),
)
if not user_id_mapper.has(in_dict['owner']):
raise Exception('bad owner')
owner = user_id_mapper.get(in_dict['owner'])
members = {
user_id_mapper.get(key)
for key in in_dict['members']
if user_id_mapper.has(key)
}
subscriber_handler.set_info(
stream_id=stream_id,
owner=owner,
members=members,
)
# unmapped fields:
# guest_access_url: no Zulip equivalent
# created: we just use "now"
# members: no good sample data
# owners: no good sample data
# participants: no good sample data
return out_dict
return list(map(process, flat_data))
def make_realm(realm_id: int) -> ZerverFieldsT:
NOW = float(timezone_now().timestamp())
domain_name = settings.EXTERNAL_HOST
realm_subdomain = ""
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'HipChat')
realm = build_realm(zerver_realm, realm_id, domain_name)
# We may override these later.
realm['zerver_defaultstream'] = []
return realm
def write_avatar_data(raw_user_data: List[ZerverFieldsT],
output_dir: str,
realm_id: int) -> None:
avatar_folder = os.path.join(output_dir, 'avatars')
avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))
os.makedirs(avatar_realm_folder, exist_ok=True)
avatar_records = convert_avatar_data(
avatar_folder=avatar_folder,
raw_data=raw_user_data,
realm_id=realm_id,
)
create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')
def write_emoticon_data(realm_id: int,
data_dir: str,
output_dir: str) -> List[ZerverFieldsT]:
'''
This function does most of the work for processing emoticons, the bulk
of which is copying files. We also write a json file with metadata.
Finally, we return a list of RealmEmoji dicts to our caller.
In our data_dir we have a pretty simple setup:
emoticons.json - has very simple metadata on emojis:
{
"Emoticon": {
"id": 9875487,
"path": "emoticons/yasss.jpg",
"shortcut": "yasss"
}
},
{
"Emoticon": {
"id": 718017,
"path": "emoticons/yayyyyy.gif",
"shortcut": "yayyyyy"
}
}
emoticons/ - contains a bunch of image files:
slytherinsnake.gif
spanishinquisition.jpg
sparkle.png
spiderman.gif
stableparrot.gif
stalkerparrot.gif
supergirl.png
superman.png
We move all the relevant files to Zulip's more nested
directory structure.
'''
logging.info('Starting to process emoticons')
fn = 'emoticons.json'
data_file = os.path.join(data_dir, fn)
with open(data_file) as f:
data = ujson.load(f)
flat_data = [
dict(
path=d['Emoticon']['path'],
name=d['Emoticon']['shortcut'],
)
for d in data
]
emoji_folder = os.path.join(output_dir, 'emoji')
os.makedirs(emoji_folder, exist_ok=True)
def process(data: ZerverFieldsT) -> ZerverFieldsT:
source_sub_path = data['path']
source_fn = os.path.basename(source_sub_path)
source_path = os.path.join(data_dir, source_sub_path)
# Use our template from RealmEmoji
# PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
target_fn = source_fn
target_sub_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=realm_id,
emoji_file_name=target_fn,
)
target_path = os.path.join(emoji_folder, target_sub_path)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
source_path = os.path.abspath(source_path)
target_path = os.path.abspath(target_path)
shutil.copyfile(source_path, target_path)
return dict(
path=target_path,
s3_path=target_path,
file_name=target_fn,
realm_id=realm_id,
name=data['name'],
)
emoji_records = list(map(process, flat_data))
create_converted_data_files(emoji_records, output_dir, '/emoji/records.json')
realmemoji = [
build_realm_emoji(
realm_id=realm_id,
name=rec['name'],
id=NEXT_ID('realmemoji'),
file_name=rec['file_name'],
)
for rec in emoji_records
]
logging.info('Done processing emoticons')
return realmemoji
def write_message_data(realm_id: int,
message_key: str,
zerver_recipient: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
data_dir: str,
output_dir: str,
masking_content: bool,
stream_id_mapper: IdMapper,
user_id_mapper: IdMapper,
user_handler: UserHandler,
attachment_handler: AttachmentHandler) -> None:
stream_id_to_recipient_id = {
d['type_id']: d['id']
for d in zerver_recipient
if d['type'] == Recipient.STREAM
}
user_id_to_recipient_id = {
d['type_id']: d['id']
for d in zerver_recipient
if d['type'] == Recipient.PERSONAL
}
def get_stream_recipient_id(raw_message: ZerverFieldsT) -> int:
fn_id = raw_message['fn_id']
stream_id = stream_id_mapper.get(fn_id)
recipient_id = stream_id_to_recipient_id[stream_id]
return recipient_id
def get_pm_recipient_id(raw_message: ZerverFieldsT) -> int:
raw_user_id = raw_message['receiver_id']
assert(raw_user_id)
user_id = user_id_mapper.get(raw_user_id)
recipient_id = user_id_to_recipient_id[user_id]
return recipient_id
if message_key in ['UserMessage', 'NotificationMessage']:
is_pm_data = False
dir_glob = os.path.join(data_dir, 'rooms', '*', 'history.json')
get_recipient_id = get_stream_recipient_id
get_files_dir = lambda fn_id: os.path.join(data_dir, 'rooms', str(fn_id), 'files')
elif message_key == 'PrivateUserMessage':
is_pm_data = True
dir_glob = os.path.join(data_dir, 'users', '*', 'history.json')
get_recipient_id = get_pm_recipient_id
get_files_dir = lambda fn_id: os.path.join(data_dir, 'users', 'files')
else:
raise Exception('programming error: invalid message_key: ' + message_key)
history_files = glob.glob(dir_glob)
for fn in history_files:
dir = os.path.dirname(fn)
fn_id = os.path.basename(dir)
files_dir = get_files_dir(fn_id)
process_message_file(
realm_id=realm_id,
fn=fn,
fn_id=fn_id,
files_dir=files_dir,
get_recipient_id=get_recipient_id,
message_key=message_key,
subscriber_map=subscriber_map,
data_dir=data_dir,
output_dir=output_dir,
is_pm_data=is_pm_data,
masking_content=masking_content,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
attachment_handler=attachment_handler,
)
def get_hipchat_sender_id(realm_id: int,
message_dict: Dict[str, Any],
user_id_mapper: IdMapper,
user_handler: UserHandler) -> int:
'''
The HipChat export is inconsistent in how it renders
senders, and sometimes we don't even get an id.
'''
if isinstance(message_dict['sender'], str):
# Some Hipchat instances just give us a person's
# name in the sender field for NotificationMessage.
# We turn them into a mirror user.
mirror_user = user_handler.get_mirror_user(
realm_id=realm_id,
name=message_dict['sender'],
)
sender_id = mirror_user['id']
return sender_id
raw_sender_id = message_dict['sender']['id']
if raw_sender_id == 0:
mirror_user = user_handler.get_mirror_user(
realm_id=realm_id,
name=message_dict['sender']['name']
)
sender_id = mirror_user['id']
return sender_id
if not user_id_mapper.has(raw_sender_id):
mirror_user = user_handler.get_mirror_user(
realm_id=realm_id,
name=message_dict['sender']['id']
)
sender_id = mirror_user['id']
return sender_id
# HAPPY PATH: Hipchat just gave us an ordinary
# sender_id.
sender_id = user_id_mapper.get(raw_sender_id)
return sender_id
def process_message_file(realm_id: int,
fn: str,
fn_id: str,
files_dir: str,
get_recipient_id: Callable[[ZerverFieldsT], int],
message_key: str,
subscriber_map: Dict[int, Set[int]],
data_dir: str,
output_dir: str,
is_pm_data: bool,
masking_content: bool,
user_id_mapper: IdMapper,
user_handler: UserHandler,
attachment_handler: AttachmentHandler) -> None:
def get_raw_messages(fn: str) -> List[ZerverFieldsT]:
with open(fn) as f:
data = ujson.load(f)
flat_data = [
d[message_key]
for d in data
if message_key in d
]
def get_raw_message(d: Dict[str, Any]) -> Optional[ZerverFieldsT]:
sender_id = get_hipchat_sender_id(
realm_id=realm_id,
message_dict=d,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
)
if is_pm_data:
if sender_id != fn_id:
# PMs are in multiple places in the Hipchat export,
# and we only use the copy from the sender
return None
content = d['message']
if masking_content:
content = re.sub('[a-z]', 'x', content)
content = re.sub('[A-Z]', 'X', content)
return dict(
fn_id=fn_id,
sender_id=sender_id,
receiver_id=d.get('receiver', {}).get('id'),
content=content,
mention_user_ids=d.get('mentions', []),
pub_date=str_date_to_float(d['timestamp']),
attachment=d.get('attachment'),
files_dir=files_dir,
)
raw_messages = []
for d in flat_data:
raw_message = get_raw_message(d)
if raw_message is not None:
raw_messages.append(raw_message)
return raw_messages
raw_messages = get_raw_messages(fn)
def process_batch(lst: List[Any]) -> None:
process_raw_message_batch(
realm_id=realm_id,
raw_messages=lst,
subscriber_map=subscriber_map,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
attachment_handler=attachment_handler,
get_recipient_id=get_recipient_id,
is_pm_data=is_pm_data,
output_dir=output_dir,
)
chunk_size = 1000
process_list_in_batches(
lst=raw_messages,
chunk_size=chunk_size,
process_batch=process_batch,
)
def process_raw_message_batch(realm_id: int,
raw_messages: List[Dict[str, Any]],
subscriber_map: Dict[int, Set[int]],
user_id_mapper: IdMapper,
user_handler: UserHandler,
attachment_handler: AttachmentHandler,
get_recipient_id: Callable[[ZerverFieldsT], int],
is_pm_data: bool,
output_dir: str) -> None:
def fix_mentions(content: str,
mention_user_ids: Set[int]) -> str:
for user_id in mention_user_ids:
user = user_handler.get_user(user_id=user_id)
hipchat_mention = '@{short_name}'.format(**user)
zulip_mention = '@**{full_name}**'.format(**user)
content = content.replace(hipchat_mention, zulip_mention)
content = content.replace('@here', '@**all**')
return content
mention_map = dict() # type: Dict[int, Set[int]]
def make_message(message_id: int, raw_message: ZerverFieldsT) -> ZerverFieldsT:
# One side effect here:
mention_user_ids = {
user_id_mapper.get(id)
for id in set(raw_message['mention_user_ids'])
if user_id_mapper.has(id)
}
mention_map[message_id] = mention_user_ids
content = fix_mentions(
content=raw_message['content'],
mention_user_ids=mention_user_ids,
)
pub_date = raw_message['pub_date']
recipient_id = get_recipient_id(raw_message)
rendered_content = None
if is_pm_data:
topic_name = ''
else:
topic_name = 'imported from hipchat'
user_id = raw_message['sender_id']
# Another side effect:
extra_content = attachment_handler.handle_message_data(
realm_id=realm_id,
message_id=message_id,
sender_id=user_id,
attachment=raw_message['attachment'],
files_dir=raw_message['files_dir'],
)
if extra_content:
has_attachment = True
content += '\n' + extra_content
else:
has_attachment = False
return build_message(
content=content,
message_id=message_id,
pub_date=pub_date,
recipient_id=recipient_id,
rendered_content=rendered_content,
topic_name=topic_name,
user_id=user_id,
has_attachment=has_attachment,
)
zerver_message = [
make_message(
message_id=NEXT_ID('message'),
raw_message=raw_message
)
for raw_message in raw_messages
]
zerver_usermessage = make_user_messages(
zerver_message=zerver_message,
subscriber_map=subscriber_map,
is_pm_data=is_pm_data,
mention_map=mention_map,
)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage,
)
dump_file_id = NEXT_ID('dump_file_id')
message_file = "/messages-%06d.json" % (dump_file_id,)
create_converted_data_files(message_json, output_dir, message_file)
def make_user_messages(zerver_message: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
is_pm_data: bool,
mention_map: Dict[int, Set[int]]) -> List[ZerverFieldsT]:
zerver_usermessage = []
for message in zerver_message:
message_id = message['id']
recipient_id = message['recipient']
sender_id = message['sender']
mention_user_ids = mention_map[message_id]
subscriber_ids = subscriber_map.get(recipient_id, set())
user_ids = subscriber_ids | {sender_id}
for user_id in user_ids:
is_mentioned = user_id in mention_user_ids
user_message = build_user_message(
user_id=user_id,
message_id=message_id,
is_private=is_pm_data,
is_mentioned=is_mentioned,
)
zerver_usermessage.append(user_message)
return zerver_usermessage
def do_convert_data(input_tar_file: str,
output_dir: str,
masking_content: bool) -> None:
input_data_dir = untar_input_file(input_tar_file)
attachment_handler = AttachmentHandler()
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
user_id_mapper = IdMapper()
stream_id_mapper = IdMapper()
realm_id = 0
realm = make_realm(realm_id=realm_id)
# users.json -> UserProfile
raw_user_data = read_user_data(data_dir=input_data_dir)
convert_user_data(
user_handler=user_handler,
user_id_mapper=user_id_mapper,
raw_data=raw_user_data,
realm_id=realm_id,
)
normal_users = user_handler.get_normal_users()
# Don't write zerver_userprofile here, because we
# may add more users later.
# streams.json -> Stream
raw_stream_data = read_room_data(data_dir=input_data_dir)
zerver_stream = convert_room_data(
raw_data=raw_stream_data,
subscriber_handler=subscriber_handler,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
realm_id=realm_id,
)
realm['zerver_stream'] = zerver_stream
zerver_recipient = build_recipients(
zerver_userprofile=normal_users,
zerver_stream=zerver_stream,
)
realm['zerver_recipient'] = zerver_recipient
public_stream_subscriptions = build_public_stream_subscriptions(
zerver_userprofile=normal_users,
zerver_recipient=zerver_recipient,
zerver_stream=zerver_stream,
)
private_stream_subscriptions = build_private_stream_subscriptions(
get_users=subscriber_handler.get_users,
zerver_recipient=zerver_recipient,
zerver_stream=zerver_stream,
)
personal_subscriptions = build_personal_subscriptions(
zerver_recipient=zerver_recipient,
)
zerver_subscription = \
public_stream_subscriptions + \
personal_subscriptions + \
private_stream_subscriptions
realm['zerver_subscription'] = zerver_subscription
zerver_realmemoji = write_emoticon_data(
realm_id=realm_id,
data_dir=input_data_dir,
output_dir=output_dir,
)
realm['zerver_realmemoji'] = zerver_realmemoji
subscriber_map = make_subscriber_map(
zerver_subscription=zerver_subscription,
)
logging.info('Start importing message data')
for message_key in ['UserMessage',
'NotificationMessage',
'PrivateUserMessage']:
write_message_data(
realm_id=realm_id,
message_key=message_key,
zerver_recipient=zerver_recipient,
subscriber_map=subscriber_map,
data_dir=input_data_dir,
output_dir=output_dir,
masking_content=masking_content,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
attachment_handler=attachment_handler,
)
# Order is important here...don't write users until
# we process everything else, since we may introduce
# mirror users when processing messages.
realm['zerver_userprofile'] = user_handler.get_all_users()
realm['sort_by_date'] = True
create_converted_data_files(realm, output_dir, '/realm.json')
logging.info('Start importing avatar data')
write_avatar_data(
raw_user_data=raw_user_data,
output_dir=output_dir,
realm_id=realm_id,
)
attachment_handler.write_info(
output_dir=output_dir,
realm_id=realm_id,
)
logging.info('Start making tarball')
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
logging.info('Done making tarball')
| [
"str",
"str",
"str",
"UserHandler",
"IdMapper",
"List[ZerverFieldsT]",
"int",
"ZerverFieldsT",
"str",
"List[ZerverFieldsT]",
"int",
"ZerverFieldsT",
"str",
"List[ZerverFieldsT]",
"SubscriberHandler",
"IdMapper",
"IdMapper",
"int",
"str",
"ZerverFieldsT",
"int",
"List[ZerverFieldsT]",
"str",
"int",
"int",
"str",
"str",
"ZerverFieldsT",
"int",
"str",
"List[ZerverFieldsT]",
"Dict[int, Set[int]]",
"str",
"str",
"bool",
"IdMapper",
"IdMapper",
"UserHandler",
"AttachmentHandler",
"ZerverFieldsT",
"ZerverFieldsT",
"int",
"Dict[str, Any]",
"IdMapper",
"UserHandler",
"int",
"str",
"str",
"str",
"Callable[[ZerverFieldsT], int]",
"str",
"Dict[int, Set[int]]",
"str",
"str",
"bool",
"bool",
"IdMapper",
"UserHandler",
"AttachmentHandler",
"str",
"Dict[str, Any]",
"List[Any]",
"int",
"List[Dict[str, Any]]",
"Dict[int, Set[int]]",
"IdMapper",
"UserHandler",
"AttachmentHandler",
"Callable[[ZerverFieldsT], int]",
"bool",
"str",
"str",
"Set[int]",
"int",
"ZerverFieldsT",
"List[ZerverFieldsT]",
"Dict[int, Set[int]]",
"bool",
"Dict[int, Set[int]]",
"str",
"str",
"bool"
] | [
1196,
1617,
2077,
2275,
2326,
2368,
2421,
2529,
4479,
4518,
4573,
5193,
5655,
5858,
5921,
5980,
6028,
6070,
6191,
6397,
7623,
8029,
8084,
8121,
8578,
8617,
8658,
10203,
11627,
11668,
11714,
11774,
11828,
11868,
11913,
11960,
12009,
12056,
12112,
12478,
12716,
14393,
14438,
14496,
14546,
15804,
15838,
15875,
15916,
15964,
16034,
16080,
16136,
16178,
16220,
16268,
16315,
16364,
16422,
16480,
16720,
18086,
18699,
18748,
18816,
18883,
18937,
19000,
19067,
19141,
19189,
19234,
19278,
19756,
19774,
21973,
22033,
22089,
22131,
22946,
22983,
23025
] | [
1199,
1620,
2080,
2286,
2334,
2387,
2424,
2542,
4482,
4537,
4576,
5206,
5658,
5877,
5938,
5988,
6036,
6073,
6194,
6410,
7626,
8048,
8087,
8124,
8581,
8620,
8661,
10216,
11630,
11671,
11733,
11793,
11831,
11871,
11917,
11968,
12017,
12067,
12129,
12491,
12729,
14396,
14452,
14504,
14557,
15807,
15841,
15878,
15919,
15994,
16037,
16099,
16139,
16181,
16224,
16272,
16323,
16375,
16439,
16483,
16734,
18095,
18702,
18768,
18835,
18891,
18948,
19017,
19097,
19145,
19192,
19237,
19286,
19759,
19787,
21992,
22052,
22093,
22150,
22949,
22986,
23029
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/hipchat_attachment.py | import logging
import shutil
import os
from zerver.data_import.import_util import (
build_attachment,
create_converted_data_files,
)
from typing import Any, Dict, List, Optional
class AttachmentHandler:
def __init__(self) -> None:
self.info_dict = dict() # type: Dict[str, Dict[str, Any]]
def handle_message_data(self,
realm_id: int,
message_id: int,
sender_id: int,
attachment: Dict[str, Any],
files_dir: str) -> Optional[str]:
if not attachment:
return None
name = attachment['name']
if 'path' not in attachment:
logging.info('Skipping HipChat attachment with missing path data: ' + name)
return None
size = attachment['size']
path = attachment['path']
local_fn = os.path.join(files_dir, path)
target_path = os.path.join(
str(realm_id),
'HipChatImportAttachment',
path
)
if target_path in self.info_dict:
logging.info("file used multiple times: " + path)
info = self.info_dict[target_path]
info['message_ids'].add(message_id)
return info['content']
# HipChat provides size info, but it's not
# completely trustworthy, so we we just
# ask the OS for file details.
size = os.path.getsize(local_fn)
mtime = os.path.getmtime(local_fn)
content = '[{name}](/user_uploads/{path})'.format(
name=name,
path=target_path,
)
info = dict(
message_ids={message_id},
sender_id=sender_id,
local_fn=local_fn,
target_path=target_path,
name=name,
size=size,
mtime=mtime,
content=content,
)
self.info_dict[target_path] = info
return content
def write_info(self, output_dir: str, realm_id: int) -> None:
attachments = [] # type: List[Dict[str, Any]]
uploads_records = [] # type: List[Dict[str, Any]]
def add_attachment(info: Dict[str, Any]) -> None:
build_attachment(
realm_id=realm_id,
message_ids=info['message_ids'],
user_id=info['sender_id'],
fileinfo=dict(
created=info['mtime'], # minor lie
size=info['size'],
name=info['name'],
),
s3_path=info['target_path'],
zerver_attachment=attachments,
)
def add_upload(info: Dict[str, Any]) -> None:
target_path = info['target_path']
upload_rec = dict(
size=info['size'],
user_profile_id=info['sender_id'],
realm_id=realm_id,
s3_path=target_path,
path=target_path,
content_type=None,
)
uploads_records.append(upload_rec)
def make_full_target_path(info: Dict[str, Any]) -> str:
target_path = info['target_path']
full_target_path = os.path.join(
output_dir,
'uploads',
target_path,
)
full_target_path = os.path.abspath(full_target_path)
os.makedirs(os.path.dirname(full_target_path), exist_ok=True)
return full_target_path
def copy_file(info: Dict[str, Any]) -> None:
source_path = info['local_fn']
target_path = make_full_target_path(info)
shutil.copyfile(source_path, target_path)
logging.info('Start processing attachment files')
for info in self.info_dict.values():
add_attachment(info)
add_upload(info)
copy_file(info)
uploads_folder = os.path.join(output_dir, 'uploads')
os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True)
attachment = dict(
zerver_attachment=attachments
)
create_converted_data_files(uploads_records, output_dir, '/uploads/records.json')
create_converted_data_files(attachment, output_dir, '/attachment.json')
logging.info('Done processing attachment files')
| [
"int",
"int",
"int",
"Dict[str, Any]",
"str",
"str",
"int",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]"
] | [
386,
431,
475,
520,
575,
2040,
2055,
2217,
2719,
3150,
3567
] | [
389,
434,
478,
534,
578,
2043,
2058,
2231,
2733,
3164,
3581
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/hipchat_subscriber.py | from typing import Any, Dict, Set
class SubscriberHandler:
'''
A note on ids here: we borrow Hipchat ids as Zulip
ids during the conversion phase. (They get re-mapped
during import, but that doesn't concern use here.)
So these are all synonymous:
HipChat room_id == Zulip stream_id
member ids = hipchat user ids = Zulip user_id
owner id = hipchat user id = Zulip user_id
In this class, when it's somewhat arbitrary whether
to call something a "room" or a "stream", we use
the Zulip naming.
'''
def __init__(self) -> None:
self.stream_info = dict() # type: Dict[int, Dict[str, Any]]
def set_info(self,
stream_id: int,
owner: int,
members: Set[int]) -> None:
# Our callers are basically giving us
# data straight out of rooms.json.
self.stream_info[stream_id] = dict(
owner=owner,
members=members,
)
def get_users(self,
stream_id: int) -> Set[int]:
info = self.stream_info[stream_id]
users = info['members'] | {info['owner']}
return users
| [
"int",
"int",
"Set[int]",
"int"
] | [
712,
741,
772,
1042
] | [
715,
744,
780,
1045
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/hipchat_user.py | from typing import Any, Dict, List
from django.utils.timezone import now as timezone_now
from zerver.data_import.import_util import (
build_user_profile,
)
class UserHandler:
'''
Our UserHandler class is a glorified wrapper
around the data that eventually goes into
zerver_userprofile.
The class helps us do things like map ids
to names for mentions.
We also sometimes need to build mirror
users on the fly.
'''
def __init__(self) -> None:
self.id_to_user_map = dict() # type: Dict[int, Dict[str, Any]]
self.name_to_mirror_user_map = dict() # type: Dict[str, Dict[str, Any]]
self.mirror_user_id = 1
def add_user(self, user: Dict[str, Any]) -> None:
user_id = user['id']
self.id_to_user_map[user_id] = user
def get_user(self, user_id: int) -> Dict[str, Any]:
user = self.id_to_user_map[user_id]
return user
def get_mirror_user(self,
realm_id: int,
name: str) -> Dict[str, Any]:
if name in self.name_to_mirror_user_map:
user = self.name_to_mirror_user_map[name]
return user
user_id = self._new_mirror_user_id()
short_name = name
full_name = name
email = 'mirror-{user_id}@example.com'.format(user_id=user_id)
delivery_email = email
avatar_source = 'G'
date_joined = int(timezone_now().timestamp())
timezone = 'UTC'
user = build_user_profile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=user_id,
is_active=False,
is_realm_admin=False,
is_guest=False,
is_mirror_dummy=True,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
self.name_to_mirror_user_map[name] = user
return user
def _new_mirror_user_id(self) -> int:
next_id = self.mirror_user_id
while next_id in self.id_to_user_map:
next_id += 1
self.mirror_user_id = next_id + 1
return next_id
def get_normal_users(self) -> List[Dict[str, Any]]:
users = list(self.id_to_user_map.values())
return users
def get_all_users(self) -> List[Dict[str, Any]]:
normal_users = self.get_normal_users()
mirror_users = list(self.name_to_mirror_user_map.values())
all_users = normal_users + mirror_users
return all_users
| [
"Dict[str, Any]",
"int",
"int",
"str"
] | [
704,
835,
988,
1023
] | [
718,
838,
991,
1026
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/import_util.py | import random
import requests
import shutil
import logging
import os
import ujson
from typing import List, Dict, Any, Optional, Set, Callable
from django.forms.models import model_to_dict
from zerver.models import Realm, RealmEmoji, Subscription, Recipient, \
Attachment, Stream, Message, UserProfile
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.parallel import run_parallel
# stubs
ZerverFieldsT = Dict[str, Any]
def build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,
other_product: str) -> List[ZerverFieldsT]:
realm = Realm(id=realm_id, date_created=time,
name=realm_subdomain, string_id=realm_subdomain,
description=("Organization imported from %s!" % (other_product)))
auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]
realm_dict = model_to_dict(realm, exclude='authentication_methods')
realm_dict['authentication_methods'] = auth_methods
return[realm_dict]
def build_user_profile(avatar_source: str,
date_joined: Any,
delivery_email: str,
email: str,
full_name: str,
id: int,
is_active: bool,
is_realm_admin: bool,
is_guest: bool,
is_mirror_dummy: bool,
realm_id: int,
short_name: str,
timezone: Optional[str]) -> ZerverFieldsT:
pointer = -1
obj = UserProfile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=id,
is_active=is_active,
is_realm_admin=is_realm_admin,
is_guest=is_guest,
pointer=pointer,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
dct = model_to_dict(obj)
return dct
def build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,
timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:
avatar = dict(
path=avatar_url, # Save original avatar url here, which is downloaded later
realm_id=realm_id,
content_type=None,
user_profile_id=zulip_user_id,
last_modified=timestamp,
user_profile_email=email,
s3_path="",
size="")
avatar_list.append(avatar)
def make_subscriber_map(zerver_subscription: List[ZerverFieldsT]) -> Dict[int, Set[int]]:
'''
This can be convenient for building up UserMessage
rows.
'''
subscriber_map = dict() # type: Dict[int, Set[int]]
for sub in zerver_subscription:
user_id = sub['user_profile']
recipient_id = sub['recipient']
if recipient_id not in subscriber_map:
subscriber_map[recipient_id] = set()
subscriber_map[recipient_id].add(user_id)
return subscriber_map
def build_subscription(recipient_id: int, user_id: int,
subscription_id: int) -> ZerverFieldsT:
subscription = Subscription(
color=random.choice(stream_colors),
id=subscription_id)
subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])
subscription_dict['user_profile'] = user_id
subscription_dict['recipient'] = recipient_id
return subscription_dict
def build_public_stream_subscriptions(
zerver_userprofile: List[ZerverFieldsT],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
This function is only used for Hipchat now, but it may apply to
future conversions. We often don't get full subscriber data in
the Hipchat export, so this function just autosubscribes all
users to every public stream. This returns a list of Subscription
dicts.
'''
subscriptions = [] # type: List[ZerverFieldsT]
public_stream_ids = {
stream['id']
for stream in zerver_stream
if not stream['invite_only']
}
public_stream_recipient_ids = {
recipient['id']
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in public_stream_ids
}
user_ids = [
user['id']
for user in zerver_userprofile
]
for recipient_id in public_stream_recipient_ids:
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_private_stream_subscriptions(
get_users: Callable[..., Set[int]],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
stream_ids = {
stream['id']
for stream in zerver_stream
if stream['invite_only']
}
recipient_map = {
recipient['id']: recipient['type_id'] # recipient_id -> stream_id
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in stream_ids
}
for recipient_id, stream_id in recipient_map.items():
user_ids = get_users(stream_id=stream_id)
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_personal_subscriptions(zerver_recipient: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
personal_recipients = [
recipient
for recipient in zerver_recipient
if recipient['type'] == Recipient.PERSONAL
]
for recipient in personal_recipients:
recipient_id = recipient['id']
user_id = recipient['type_id']
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:
recipient = Recipient(
type_id=type_id, # stream id
id=recipient_id,
type=type)
recipient_dict = model_to_dict(recipient)
return recipient_dict
def build_recipients(zerver_userprofile: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
As of this writing, we only use this in the HipChat
conversion. The Slack and Gitter conversions do it more
tightly integrated with creating other objects.
'''
recipients = []
for user in zerver_userprofile:
type_id = user['id']
type = Recipient.PERSONAL
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
for stream in zerver_stream:
type_id = stream['id']
type = Recipient.STREAM
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
return recipients
def build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,
domain_name: str) -> ZerverFieldsT:
realm = dict(zerver_client=[{"name": "populate_db", "id": 1},
{"name": "website", "id": 2},
{"name": "API", "id": 3}],
zerver_customprofilefield=[],
zerver_customprofilefieldvalue=[],
zerver_userpresence=[], # shows last logged in data, which is not available
zerver_userprofile_mirrordummy=[],
zerver_realmdomain=[{"realm": realm_id,
"allow_subdomains": False,
"domain": domain_name,
"id": realm_id}],
zerver_useractivity=[],
zerver_realm=zerver_realm,
zerver_huddle=[],
zerver_userprofile_crossrealm=[],
zerver_useractivityinterval=[],
zerver_reaction=[],
zerver_realmemoji=[],
zerver_realmfilter=[])
return realm
def build_usermessages(zerver_usermessage: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
recipient_id: int,
mentioned_user_ids: List[int],
message_id: int) -> None:
user_ids = subscriber_map.get(recipient_id, set())
if user_ids:
for user_id in sorted(user_ids):
is_mentioned = user_id in mentioned_user_ids
# Slack and Gitter don't yet triage private messages.
# It's possible we don't even get PMs from them.
is_private = False
usermessage = build_user_message(
user_id=user_id,
message_id=message_id,
is_private=is_private,
is_mentioned=is_mentioned,
)
zerver_usermessage.append(usermessage)
def build_user_message(user_id: int,
message_id: int,
is_private: bool,
is_mentioned: bool) -> ZerverFieldsT:
flags_mask = 1 # For read
if is_mentioned:
flags_mask += 8 # For mentioned
if is_private:
flags_mask += 2048 # For is_private
id = NEXT_ID('user_message')
usermessage = dict(
id=id,
user_profile=user_id,
message=message_id,
flags_mask=flags_mask,
)
return usermessage
def build_defaultstream(realm_id: int, stream_id: int,
defaultstream_id: int) -> ZerverFieldsT:
defaultstream = dict(
stream=stream_id,
realm=realm_id,
id=defaultstream_id)
return defaultstream
def build_stream(date_created: Any, realm_id: int, name: str,
description: str, stream_id: int, deactivated: bool=False,
invite_only: bool=False) -> ZerverFieldsT:
stream = Stream(
name=name,
deactivated=deactivated,
description=description,
date_created=date_created,
invite_only=invite_only,
id=stream_id)
stream_dict = model_to_dict(stream,
exclude=['realm'])
stream_dict['realm'] = realm_id
return stream_dict
def build_message(topic_name: str, pub_date: float, message_id: int, content: str,
rendered_content: Optional[str], user_id: int, recipient_id: int,
has_image: bool=False, has_link: bool=False,
has_attachment: bool=True) -> ZerverFieldsT:
zulip_message = Message(
rendered_content_version=1, # this is Zulip specific
pub_date=pub_date,
id=message_id,
content=content,
rendered_content=rendered_content,
has_image=has_image,
has_attachment=has_attachment,
has_link=has_link)
zulip_message.set_topic_name(topic_name)
zulip_message_dict = model_to_dict(zulip_message,
exclude=['recipient', 'sender', 'sending_client'])
zulip_message_dict['sender'] = user_id
zulip_message_dict['sending_client'] = 1
zulip_message_dict['recipient'] = recipient_id
return zulip_message_dict
def build_attachment(realm_id: int, message_ids: Set[int],
user_id: int, fileinfo: ZerverFieldsT, s3_path: str,
zerver_attachment: List[ZerverFieldsT]) -> None:
"""
This function should be passed a 'fileinfo' dictionary, which contains
information about 'size', 'created' (created time) and ['name'] (filename).
"""
attachment_id = NEXT_ID('attachment')
attachment = Attachment(
id=attachment_id,
size=fileinfo['size'],
create_time=fileinfo['created'],
is_realm_public=True,
path_id=s3_path,
file_name=fileinfo['name'])
attachment_dict = model_to_dict(attachment,
exclude=['owner', 'messages', 'realm'])
attachment_dict['owner'] = user_id
attachment_dict['messages'] = list(message_ids)
attachment_dict['realm'] = realm_id
zerver_attachment.append(attachment_dict)
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
"""
This function gets the avatar of the user and saves it in the
user's avatar directory with both the extensions '.png' and '.original'
Required parameters:
1. avatar_list: List of avatars to be mapped in avatars records.json file
2. avatar_dir: Folder where the downloaded avatars are saved
3. realm_id: Realm ID.
We use this for Slack and Gitter conversions, where avatars need to be
downloaded. For simpler conversions see write_avatar_png.
"""
def get_avatar(avatar_upload_item: List[str]) -> int:
avatar_url = avatar_upload_item[0]
image_path = os.path.join(avatar_dir, avatar_upload_item[1])
original_image_path = os.path.join(avatar_dir, avatar_upload_item[2])
response = requests.get(avatar_url + size_url_suffix, stream=True)
with open(image_path, 'wb') as image_file:
shutil.copyfileobj(response.raw, image_file)
shutil.copy(image_path, original_image_path)
return 0
logging.info('######### GETTING AVATARS #########\n')
logging.info('DOWNLOADING AVATARS .......\n')
avatar_original_list = []
avatar_upload_list = []
for avatar in avatar_list:
avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
avatar_url = avatar['path']
avatar_original = dict(avatar)
image_path = ('%s.png' % (avatar_hash))
original_image_path = ('%s.original' % (avatar_hash))
avatar_upload_list.append([avatar_url, image_path, original_image_path])
# We don't add the size field here in avatar's records.json,
# since the metadata is not needed on the import end, and we
# don't have it until we've downloaded the files anyway.
avatar['path'] = image_path
avatar['s3_path'] = image_path
avatar_original['path'] = original_image_path
avatar_original['s3_path'] = original_image_path
avatar_original_list.append(avatar_original)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):
output.append(job)
logging.info('######### GETTING AVATARS FINISHED #########\n')
return avatar_list + avatar_original_list
def write_avatar_png(avatar_folder: str,
realm_id: int,
user_id: int,
bits: bytes) -> ZerverFieldsT:
'''
Use this function for conversions like Hipchat where
the bits for the .png file come in something like
a users.json file, and where we don't have to
fetch avatar images externally.
'''
avatar_hash = user_avatar_path_from_ids(
user_profile_id=user_id,
realm_id=realm_id,
)
image_fn = avatar_hash + '.original'
image_path = os.path.join(avatar_folder, image_fn)
with open(image_path, 'wb') as image_file:
image_file.write(bits)
# Return metadata that eventually goes in records.json.
metadata = dict(
path=image_path,
s3_path=image_path,
realm_id=realm_id,
user_profile_id=user_id,
)
return metadata
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the uploads and saves it in the realm's upload directory.
Required parameters:
1. upload_list: List of uploads to be mapped in uploads records.json file
2. upload_dir: Folder where the downloaded uploads are saved
"""
def get_uploads(upload: List[str]) -> int:
upload_url = upload[0]
upload_path = upload[1]
upload_path = os.path.join(upload_dir, upload_path)
response = requests.get(upload_url, stream=True)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
with open(upload_path, 'wb') as upload_file:
shutil.copyfileobj(response.raw, upload_file)
return 0
logging.info('######### GETTING ATTACHMENTS #########\n')
logging.info('DOWNLOADING ATTACHMENTS .......\n')
upload_url_list = []
for upload in upload_list:
upload_url = upload['path']
upload_s3_path = upload['s3_path']
upload_url_list.append([upload_url, upload_s3_path])
upload['path'] = upload_s3_path
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_uploads, upload_url_list, threads=threads):
output.append(job)
logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
return upload_list
def build_realm_emoji(realm_id: int,
name: str,
id: int,
file_name: str) -> ZerverFieldsT:
return model_to_dict(
RealmEmoji(
realm_id=realm_id,
name=name,
id=id,
file_name=file_name,
)
)
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the custom emojis and saves in the output emoji folder.
Required parameters:
1. zerver_realmemoji: List of all RealmEmoji objects to be imported
2. emoji_dir: Folder where the downloaded emojis are saved
3. emoji_url_map: Maps emoji name to its url
"""
def get_emojis(upload: List[str]) -> int:
emoji_url = upload[0]
emoji_path = upload[1]
upload_emoji_path = os.path.join(emoji_dir, emoji_path)
response = requests.get(emoji_url, stream=True)
os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
with open(upload_emoji_path, 'wb') as emoji_file:
shutil.copyfileobj(response.raw, emoji_file)
return 0
emoji_records = []
upload_emoji_list = []
logging.info('######### GETTING EMOJIS #########\n')
logging.info('DOWNLOADING EMOJIS .......\n')
for emoji in zerver_realmemoji:
emoji_url = emoji_url_map[emoji['name']]
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=emoji['realm'],
emoji_file_name=emoji['name'])
upload_emoji_list.append([emoji_url, emoji_path])
emoji_record = dict(emoji)
emoji_record['path'] = emoji_path
emoji_record['s3_path'] = emoji_path
emoji_record['realm_id'] = emoji_record['realm']
emoji_record.pop('realm')
emoji_records.append(emoji_record)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_emojis, upload_emoji_list, threads=threads):
output.append(job)
logging.info('######### GETTING EMOJIS FINISHED #########\n')
return emoji_records
def create_converted_data_files(data: Any, output_dir: str, file_path: str) -> None:
output_file = output_dir + file_path
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, 'w') as fp:
ujson.dump(data, fp, indent=4)
| [
"int",
"str",
"float",
"str",
"str",
"Any",
"str",
"str",
"str",
"int",
"bool",
"bool",
"bool",
"bool",
"int",
"str",
"Optional[str]",
"int",
"int",
"str",
"str",
"Any",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"int",
"int",
"int",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"Callable[..., Set[int]]",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"int",
"int",
"int",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"int",
"str",
"List[ZerverFieldsT]",
"Dict[int, Set[int]]",
"int",
"List[int]",
"int",
"int",
"int",
"bool",
"bool",
"int",
"int",
"int",
"Any",
"int",
"str",
"str",
"int",
"str",
"float",
"int",
"str",
"Optional[str]",
"int",
"int",
"int",
"Set[int]",
"int",
"ZerverFieldsT",
"str",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"str",
"int",
"int",
"List[str]",
"str",
"int",
"int",
"bytes",
"List[ZerverFieldsT]",
"str",
"int",
"List[str]",
"int",
"str",
"int",
"str",
"List[ZerverFieldsT]",
"str",
"ZerverFieldsT",
"int",
"List[str]",
"Any",
"str",
"str"
] | [
609,
631,
642,
687,
1188,
1229,
1273,
1308,
1347,
1379,
1418,
1463,
1502,
1548,
1587,
1627,
1665,
2200,
2215,
2227,
2244,
2277,
2295,
2703,
3211,
3225,
3270,
3687,
3734,
3778,
5009,
5060,
5104,
6021,
6659,
6678,
6689,
6935,
6992,
7938,
7969,
8003,
9094,
9154,
9212,
9260,
9306,
9950,
9990,
10030,
10073,
10481,
10497,
10544,
10729,
10744,
10755,
10790,
10806,
11273,
11288,
11307,
11321,
11362,
11386,
11405,
12231,
12249,
12289,
12304,
12328,
12373,
13172,
13205,
13220,
13254,
13841,
15612,
15648,
15683,
15715,
16496,
16529,
16563,
16891,
17925,
17958,
17989,
18027,
18257,
18289,
18328,
18352,
18718,
20110,
20127,
20143
] | [
612,
634,
647,
690,
1191,
1232,
1276,
1311,
1350,
1382,
1422,
1467,
1506,
1552,
1590,
1630,
1678,
2203,
2218,
2230,
2247,
2280,
2314,
2722,
3214,
3228,
3273,
3706,
3753,
3797,
5032,
5079,
5123,
6040,
6662,
6681,
6692,
6954,
7011,
7957,
7972,
8006,
9113,
9173,
9215,
9269,
9309,
9953,
9993,
10034,
10077,
10484,
10500,
10547,
10732,
10747,
10758,
10793,
10809,
11276,
11293,
11310,
11324,
11375,
11389,
11408,
12234,
12257,
12292,
12317,
12331,
12392,
13191,
13208,
13223,
13257,
13850,
15615,
15651,
15686,
15720,
16515,
16532,
16566,
16900,
17928,
17961,
17992,
18030,
18276,
18292,
18341,
18355,
18727,
20113,
20130,
20146
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/sequencer.py | from typing import Any, Callable, Dict
'''
This module helps you set up a bunch
of sequences, similar to how database
sequences work.
You need to be a bit careful here, since
you're dealing with a big singleton, but
for data imports that's usually easy to
manage. See hipchat.py for example usage.
'''
def _seq() -> Callable[[], int]:
i = 0
def next_one() -> int:
nonlocal i
i += 1
return i
return next_one
def sequencer() -> Callable[[str], int]:
'''
Use like this:
NEXT_ID = sequencer()
message_id = NEXT_ID('message')
'''
seq_dict = dict() # type: Dict[str, Callable[[], int]]
def next_one(name: str) -> int:
if name not in seq_dict:
seq_dict[name] = _seq()
seq = seq_dict[name]
return seq()
return next_one
'''
NEXT_ID is a singleton used by an entire process, which is
almost always reasonable. If you want to have two parallel
sequences, just use different `name` values.
This object gets created once and only once during the first
import of the file.
'''
NEXT_ID = sequencer()
def is_int(key: Any) -> bool:
try:
n = int(key)
except ValueError:
return False
return n <= 999999999
class IdMapper:
def __init__(self) -> None:
self.map = dict() # type: Dict[Any, int]
self.cnt = 0
def has(self, their_id: Any) -> bool:
return their_id in self.map
def get(self, their_id: Any) -> int:
if their_id in self.map:
return self.map[their_id]
if is_int(their_id):
our_id = int(their_id)
if self.cnt > 0:
raise Exception('mixed key styles')
else:
self.cnt += 1
our_id = self.cnt
self.map[their_id] = our_id
return our_id
| [
"str",
"Any",
"Any",
"Any"
] | [
685,
1133,
1397,
1476
] | [
688,
1136,
1400,
1479
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/slack.py | import os
import ujson
import hashlib
import sys
import argparse
import shutil
import subprocess
import re
import logging
import random
import requests
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from django.forms.models import model_to_dict
from typing import Any, Dict, List, Optional, Tuple, Set
from zerver.forms import check_subdomain_available
from zerver.models import Reaction, RealmEmoji, Realm, UserProfile, Recipient, \
CustomProfileField, CustomProfileFieldValue
from zerver.data_import.slack_message_conversion import convert_to_zulip_markdown, \
get_user_full_name
from zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \
build_avatar, build_subscription, build_recipient, build_usermessages, \
build_defaultstream, build_attachment, process_avatars, process_uploads, \
process_emojis, build_realm, build_stream, build_message, \
create_converted_data_files, make_subscriber_map
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.parallel import run_parallel
from zerver.lib.upload import random_name, sanitize_name
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.lib.emoji import NAME_TO_CODEPOINT_PATH
# stubs
AddedUsersT = Dict[str, int]
AddedChannelsT = Dict[str, Tuple[str, int]]
AddedRecipientsT = Dict[str, int]
def rm_tree(path: str) -> None:
if os.path.exists(path):
shutil.rmtree(path)
def slack_workspace_to_realm(domain_name: str, realm_id: int, user_list: List[ZerverFieldsT],
realm_subdomain: str, slack_data_dir: str,
custom_emoji_list: ZerverFieldsT)-> Tuple[ZerverFieldsT, AddedUsersT,
AddedRecipientsT,
AddedChannelsT,
List[ZerverFieldsT],
ZerverFieldsT]:
"""
Returns:
1. realm, Converted Realm data
2. added_users, which is a dictionary to map from slack user id to zulip user id
3. added_recipient, which is a dictionary to map from channel name to zulip recipient_id
4. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id
5. avatars, which is list to map avatars to zulip avatar records.json
6. emoji_url_map, which is maps emoji name to its slack url
"""
NOW = float(timezone_now().timestamp())
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Slack') # type: List[ZerverFieldsT]
realm = build_realm(zerver_realm, realm_id, domain_name)
zerver_userprofile, avatars, added_users, zerver_customprofilefield, \
zerver_customprofilefield_value = users_to_zerver_userprofile(slack_data_dir, user_list,
realm_id, int(NOW), domain_name)
channels_to_zerver_stream_fields = channels_to_zerver_stream(slack_data_dir,
realm_id,
added_users,
zerver_userprofile)
zerver_realmemoji, emoji_url_map = build_realmemoji(custom_emoji_list, realm_id)
realm['zerver_realmemoji'] = zerver_realmemoji
# See https://zulipchat.com/help/set-default-streams-for-new-users
# for documentation on zerver_defaultstream
realm['zerver_userprofile'] = zerver_userprofile
# Custom profile fields
realm['zerver_customprofilefield'] = zerver_customprofilefield
realm['zerver_customprofilefieldvalue'] = zerver_customprofilefield_value
realm['zerver_defaultstream'] = channels_to_zerver_stream_fields[0]
realm['zerver_stream'] = channels_to_zerver_stream_fields[1]
realm['zerver_subscription'] = channels_to_zerver_stream_fields[3]
realm['zerver_recipient'] = channels_to_zerver_stream_fields[4]
added_channels = channels_to_zerver_stream_fields[2]
added_recipient = channels_to_zerver_stream_fields[5]
return realm, added_users, added_recipient, added_channels, avatars, emoji_url_map
def build_realmemoji(custom_emoji_list: ZerverFieldsT,
realm_id: int) -> Tuple[List[ZerverFieldsT],
ZerverFieldsT]:
zerver_realmemoji = []
emoji_url_map = {}
emoji_id = 0
for emoji_name, url in custom_emoji_list.items():
if 'emoji.slack-edge.com' in url:
# Some of the emojis we get from the api have invalid links
# this is to prevent errors related to them
realmemoji = RealmEmoji(
name=emoji_name,
id=emoji_id,
file_name=os.path.basename(url),
deactivated=False)
realmemoji_dict = model_to_dict(realmemoji, exclude=['realm', 'author'])
realmemoji_dict['author'] = None
realmemoji_dict['realm'] = realm_id
emoji_url_map[emoji_name] = url
zerver_realmemoji.append(realmemoji_dict)
emoji_id += 1
return zerver_realmemoji, emoji_url_map
def users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,
timestamp: Any, domain_name: str) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
AddedUsersT,
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_userprofile, which is a list of user profile
2. avatar_list, which is list to map avatars to zulip avatard records.json
3. added_users, which is a dictionary to map from slack user id to zulip
user id
4. zerver_customprofilefield, which is a list of all custom profile fields
5. zerver_customprofilefield_values, which is a list of user profile fields
"""
logging.info('######### IMPORTING USERS STARTED #########\n')
zerver_userprofile = []
zerver_customprofilefield = [] # type: List[ZerverFieldsT]
zerver_customprofilefield_values = [] # type: List[ZerverFieldsT]
avatar_list = [] # type: List[ZerverFieldsT]
added_users = {}
# The user data we get from the slack api does not contain custom profile data
# Hence we get it from the slack zip file
slack_data_file_user_list = get_data_file(slack_data_dir + '/users.json')
# To map user id with the custom profile fields of the corresponding user
slack_user_custom_field_map = {} # type: ZerverFieldsT
# To store custom fields corresponding to their ids
custom_field_map = {} # type: ZerverFieldsT
for user in slack_data_file_user_list:
process_slack_custom_fields(user, slack_user_custom_field_map)
# We have only one primary owner in slack, see link
# https://get.slack.help/hc/en-us/articles/201912948-Owners-and-Administrators
# This is to import the primary owner first from all the users
user_id_count = custom_field_id_count = customprofilefield_id = 0
primary_owner_id = user_id_count
user_id_count += 1
for user in users:
slack_user_id = user['id']
if user.get('is_primary_owner', False):
user_id = primary_owner_id
else:
user_id = user_id_count
# email
email = get_user_email(user, domain_name)
# avatar
# ref: https://chat.zulip.org/help/change-your-avatar
avatar_url = build_avatar_url(slack_user_id, user['team_id'],
user['profile']['avatar_hash'])
build_avatar(user_id, realm_id, email, avatar_url, timestamp, avatar_list)
# check if user is the admin
realm_admin = get_admin(user)
# timezone
timezone = get_user_timezone(user)
# Check for custom profile fields
if slack_user_id in slack_user_custom_field_map:
# For processing the fields
custom_field_map, customprofilefield_id = build_customprofile_field(
zerver_customprofilefield, slack_user_custom_field_map[slack_user_id],
customprofilefield_id, realm_id, custom_field_map)
# Store the custom field values for the corresponding user
custom_field_id_count = build_customprofilefields_values(
custom_field_map, slack_user_custom_field_map[slack_user_id], user_id,
custom_field_id_count, zerver_customprofilefield_values)
userprofile = UserProfile(
full_name=get_user_full_name(user),
short_name=user['name'],
is_active=not user['deleted'],
id=user_id,
email=email,
delivery_email=email,
avatar_source='U',
is_bot=user.get('is_bot', False),
pointer=-1,
is_realm_admin=realm_admin,
bot_type=1 if user.get('is_bot', False) else None,
date_joined=timestamp,
timezone=timezone,
last_login=timestamp)
userprofile_dict = model_to_dict(userprofile)
# Set realm id separately as the corresponding realm is not yet a Realm model instance
userprofile_dict['realm'] = realm_id
zerver_userprofile.append(userprofile_dict)
added_users[slack_user_id] = user_id
if not user.get('is_primary_owner', False):
user_id_count += 1
logging.info(u"{} -> {}".format(user['name'], userprofile_dict['email']))
process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)
logging.info('######### IMPORTING USERS FINISHED #########\n')
return zerver_userprofile, avatar_list, added_users, zerver_customprofilefield, \
zerver_customprofilefield_values
def build_customprofile_field(customprofile_field: List[ZerverFieldsT], fields: ZerverFieldsT,
customprofilefield_id: int, realm_id: int,
custom_field_map: ZerverFieldsT) -> Tuple[ZerverFieldsT, int]:
# The name of the custom profile field is not provided in the slack data
# Hash keys of the fields are provided
# Reference: https://api.slack.com/methods/users.profile.set
for field, value in fields.items():
if field not in custom_field_map:
slack_custom_fields = ['phone', 'skype']
if field in slack_custom_fields:
field_name = field
else:
field_name = ("slack custom field %s" % str(customprofilefield_id + 1))
customprofilefield = CustomProfileField(
id=customprofilefield_id,
name=field_name,
field_type=1 # For now this is defaulted to 'SHORT_TEXT'
# Processing is done in the function 'process_customprofilefields'
)
customprofilefield_dict = model_to_dict(customprofilefield,
exclude=['realm'])
customprofilefield_dict['realm'] = realm_id
custom_field_map[field] = customprofilefield_id
customprofilefield_id += 1
customprofile_field.append(customprofilefield_dict)
return custom_field_map, customprofilefield_id
def process_slack_custom_fields(user: ZerverFieldsT,
slack_user_custom_field_map: ZerverFieldsT) -> None:
slack_user_custom_field_map[user['id']] = {}
if user['profile'].get('fields'):
slack_user_custom_field_map[user['id']] = user['profile']['fields']
slack_custom_fields = ['phone', 'skype']
for field in slack_custom_fields:
if field in user['profile']:
slack_user_custom_field_map[user['id']][field] = {'value': user['profile'][field]}
def build_customprofilefields_values(custom_field_map: ZerverFieldsT, fields: ZerverFieldsT,
user_id: int, custom_field_id: int,
custom_field_values: List[ZerverFieldsT]) -> int:
for field, value in fields.items():
custom_field_value = CustomProfileFieldValue(
id=custom_field_id,
value=value['value'])
custom_field_value_dict = model_to_dict(custom_field_value,
exclude=['user_profile', 'field'])
custom_field_value_dict['user_profile'] = user_id
custom_field_value_dict['field'] = custom_field_map[field]
custom_field_values.append(custom_field_value_dict)
custom_field_id += 1
return custom_field_id
def process_customprofilefields(customprofilefield: List[ZerverFieldsT],
customprofilefield_value: List[ZerverFieldsT]) -> None:
# Process the field types by checking all field values
for field in customprofilefield:
for field_value in customprofilefield_value:
if field_value['field'] == field['id'] and len(field_value['value']) > 50:
field['field_type'] = 2 # corresponding to Long text
break
def get_user_email(user: ZerverFieldsT, domain_name: str) -> str:
if 'email' in user['profile']:
return user['profile']['email']
if 'bot_id' in user['profile']:
if 'real_name_normalized' in user['profile']:
slack_bot_name = user['profile']['real_name_normalized']
elif 'first_name' in user['profile']:
slack_bot_name = user['profile']['first_name']
else:
raise AssertionError("Could not identify bot type")
return slack_bot_name.replace("Bot", "").replace(" ", "") + "-bot@%s" % (domain_name,)
if get_user_full_name(user) == "slackbot":
return "imported-slackbot-bot@%s" % (domain_name,)
raise AssertionError("Could not find email address for Slack user %s" % (user,))
def build_avatar_url(slack_user_id: str, team_id: str, avatar_hash: str) -> str:
avatar_url = "https://ca.slack-edge.com/{}-{}-{}".format(team_id, slack_user_id,
avatar_hash)
return avatar_url
def get_admin(user: ZerverFieldsT) -> bool:
admin = user.get('is_admin', False)
owner = user.get('is_owner', False)
primary_owner = user.get('is_primary_owner', False)
if admin or owner or primary_owner:
return True
return False
def get_user_timezone(user: ZerverFieldsT) -> str:
_default_timezone = "America/New_York"
timezone = user.get("tz", _default_timezone)
if timezone is None or '/' not in timezone:
timezone = _default_timezone
return timezone
def channels_to_zerver_stream(slack_data_dir: str, realm_id: int, added_users: AddedUsersT,
zerver_userprofile: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
AddedChannelsT,
List[ZerverFieldsT],
List[ZerverFieldsT],
AddedRecipientsT]:
"""
Returns:
1. zerver_defaultstream, which is a list of the default streams
2. zerver_stream, while is a list of all streams
3. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id
4. zerver_subscription, which is a list of the subscriptions
5. zerver_recipient, which is a list of the recipients
6. added_recipient, which is a dictionary to map from channel name to zulip recipient_id
"""
logging.info('######### IMPORTING CHANNELS STARTED #########\n')
channels = get_data_file(slack_data_dir + '/channels.json')
added_channels = {}
added_recipient = {}
zerver_stream = []
zerver_subscription = [] # type: List[ZerverFieldsT]
zerver_recipient = []
zerver_defaultstream = []
stream_id_count = subscription_id_count = recipient_id_count = defaultstream_id = 0
for channel in channels:
# slack_channel_id = channel['id']
# map Slack's topic and purpose content into Zulip's stream description.
# WARN This mapping is lossy since the topic.creator, topic.last_set,
# purpose.creator, purpose.last_set fields are not preserved.
description = channel["purpose"]["value"]
stream_id = stream_id_count
recipient_id = recipient_id_count
# construct the stream object and append it to zerver_stream
stream = build_stream(float(channel["created"]), realm_id, channel["name"],
description, stream_id, channel["is_archived"])
zerver_stream.append(stream)
# construct defaultstream object
# slack has the default channel 'general' and 'random'
# where every user is subscribed
default_channels = ['general', 'random'] # Slack specific
if channel['name'] in default_channels:
defaultstream = build_defaultstream(realm_id, stream_id,
defaultstream_id)
zerver_defaultstream.append(defaultstream)
defaultstream_id += 1
added_channels[stream['name']] = (channel['id'], stream_id)
recipient = build_recipient(stream_id, recipient_id, Recipient.STREAM)
zerver_recipient.append(recipient)
added_recipient[stream['name']] = recipient_id
# TODO add recipients for private message and huddles
# construct the subscription object and append it to zerver_subscription
subscription_id_count = get_subscription(channel['members'], zerver_subscription,
recipient_id, added_users,
subscription_id_count)
# TODO add zerver_subscription which correspond to
# huddles type recipient
# For huddles:
# sub['recipient']=recipient['id'] where recipient['type_id']=added_users[member]
stream_id_count += 1
recipient_id_count += 1
logging.info(u"{} -> created".format(channel['name']))
# TODO map Slack's pins to Zulip's stars
# There is the security model that Slack's pins are known to the team owner
# as evident from where it is stored at (channels)
# "pins": [
# {
# "id": "1444755381.000003",
# "type": "C",
# "user": "U061A5N1G",
# "owner": "U061A5N1G",
# "created": "1444755463"
# }
# ],
for user in zerver_userprofile:
# this maps the recipients and subscriptions
# related to private messages
recipient = build_recipient(user['id'], recipient_id_count, Recipient.PERSONAL)
sub = build_subscription(recipient_id_count, user['id'], subscription_id_count)
zerver_recipient.append(recipient)
zerver_subscription.append(sub)
subscription_id_count += 1
recipient_id_count += 1
logging.info('######### IMPORTING STREAMS FINISHED #########\n')
return zerver_defaultstream, zerver_stream, added_channels, zerver_subscription, \
zerver_recipient, added_recipient
def get_subscription(channel_members: List[str], zerver_subscription: List[ZerverFieldsT],
recipient_id: int, added_users: AddedUsersT,
subscription_id: int) -> int:
for member in channel_members:
sub = build_subscription(recipient_id, added_users[member], subscription_id)
# The recipient corresponds to a stream for stream-readable message.
zerver_subscription.append(sub)
subscription_id += 1
return subscription_id
def convert_slack_workspace_messages(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,
added_users: AddedUsersT, added_recipient: AddedRecipientsT,
added_channels: AddedChannelsT, realm: ZerverFieldsT,
zerver_realmemoji: List[ZerverFieldsT], domain_name: str,
output_dir: str,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. reactions, which is a list of the reactions
2. uploads, which is a list of uploads to be mapped in uploads records.json
3. attachment, which is a list of the attachments
"""
all_messages = get_all_messages(slack_data_dir, added_channels)
# we sort the messages according to the timestamp to show messages with
# the proper date order
all_messages = sorted(all_messages, key=lambda message: message['ts'])
logging.info('######### IMPORTING MESSAGES STARTED #########\n')
total_reactions = [] # type: List[ZerverFieldsT]
total_attachments = [] # type: List[ZerverFieldsT]
total_uploads = [] # type: List[ZerverFieldsT]
# The messages are stored in batches
low_index = 0
upper_index = low_index + chunk_size
dump_file_id = 1
subscriber_map = make_subscriber_map(
zerver_subscription=realm['zerver_subscription'],
)
while True:
message_data = all_messages[low_index:upper_index]
if len(message_data) == 0:
break
zerver_message, zerver_usermessage, attachment, uploads, reactions = \
channel_message_to_zerver_message(
realm_id, users, added_users, added_recipient, message_data,
zerver_realmemoji, subscriber_map, added_channels,
domain_name)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage)
message_file = "/messages-%06d.json" % (dump_file_id,)
logging.info("Writing Messages to %s\n" % (output_dir + message_file))
create_converted_data_files(message_json, output_dir, message_file)
total_reactions += reactions
total_attachments += attachment
total_uploads += uploads
low_index = upper_index
upper_index = chunk_size + low_index
dump_file_id += 1
logging.info('######### IMPORTING MESSAGES FINISHED #########\n')
return total_reactions, total_uploads, total_attachments
def get_all_messages(slack_data_dir: str, added_channels: AddedChannelsT) -> List[ZerverFieldsT]:
all_messages = [] # type: List[ZerverFieldsT]
for channel_name in added_channels.keys():
channel_dir = os.path.join(slack_data_dir, channel_name)
json_names = os.listdir(channel_dir)
for json_name in json_names:
message_dir = os.path.join(channel_dir, json_name)
messages = get_data_file(message_dir)
for message in messages:
# To give every message the channel information
message['channel_name'] = channel_name
all_messages += messages
return all_messages
def channel_message_to_zerver_message(realm_id: int,
users: List[ZerverFieldsT],
added_users: AddedUsersT,
added_recipient: AddedRecipientsT,
all_messages: List[ZerverFieldsT],
zerver_realmemoji: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
added_channels: AddedChannelsT,
domain_name: str) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_message, which is a list of the messages
2. zerver_usermessage, which is a list of the usermessages
3. zerver_attachment, which is a list of the attachments
4. uploads_list, which is a list of uploads to be mapped in uploads records.json
5. reaction_list, which is a list of all user reactions
"""
zerver_message = []
zerver_usermessage = [] # type: List[ZerverFieldsT]
uploads_list = [] # type: List[ZerverFieldsT]
zerver_attachment = [] # type: List[ZerverFieldsT]
reaction_list = [] # type: List[ZerverFieldsT]
# For unicode emoji
with open(NAME_TO_CODEPOINT_PATH) as fp:
name_to_codepoint = ujson.load(fp)
for message in all_messages:
user = get_message_sending_user(message)
if not user:
# Ignore messages without user names
# These are Sometimes produced by slack
continue
subtype = message.get('subtype', False)
if subtype in [
# Zulip doesn't have a pinned_item concept
"pinned_item",
"unpinned_item",
# Slack's channel join/leave notices are spammy
"channel_join",
"channel_leave",
"channel_name"
]:
continue
try:
content, mentioned_user_ids, has_link = convert_to_zulip_markdown(
message['text'], users, added_channels, added_users)
except Exception:
print("Slack message unexpectedly missing text representation:")
print(ujson.dumps(message, indent=4))
continue
rendered_content = None
recipient_id = added_recipient[message['channel_name']]
message_id = NEXT_ID('message')
# Process message reactions
if 'reactions' in message.keys():
build_reactions(reaction_list, message['reactions'], added_users,
message_id, name_to_codepoint,
zerver_realmemoji)
# Process different subtypes of slack messages
# Subtypes which have only the action in the message should
# be rendered with '/me' in the content initially
# For example "sh_room_created" has the message 'started a call'
# which should be displayed as '/me started a call'
if subtype in ["bot_add", "sh_room_created", "me_message"]:
content = ('/me %s' % (content))
if subtype == 'file_comment':
# The file_comment message type only indicates the
# responsible user in a subfield.
message['user'] = message['comment']['user']
file_info = process_message_files(
message=message,
domain_name=domain_name,
realm_id=realm_id,
message_id=message_id,
user=user,
users=users,
added_users=added_users,
zerver_attachment=zerver_attachment,
uploads_list=uploads_list,
)
content += file_info['content']
has_link = has_link or file_info['has_link']
has_attachment = file_info['has_attachment']
has_image = file_info['has_image']
# construct message
topic_name = 'imported from slack'
zulip_message = build_message(topic_name, float(message['ts']), message_id, content,
rendered_content, added_users[user], recipient_id,
has_image, has_link, has_attachment)
zerver_message.append(zulip_message)
# construct usermessages
build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=mentioned_user_ids,
message_id=message_id,
)
return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \
reaction_list
def process_message_files(message: ZerverFieldsT,
domain_name: str,
realm_id: int,
message_id: int,
user: str,
users: List[ZerverFieldsT],
added_users: AddedUsersT,
zerver_attachment: List[ZerverFieldsT],
uploads_list: List[ZerverFieldsT]) -> Dict[str, Any]:
has_attachment = False
has_image = False
has_link = False
files = message.get('files', [])
subtype = message.get('subtype')
if subtype == 'file_share':
# In Slack messages, uploads can either have the subtype as 'file_share' or
# have the upload information in 'files' keyword
files = [message['file']]
markdown_links = []
for fileinfo in files:
url = fileinfo['url_private']
if 'files.slack.com' in url:
# For attachments with slack download link
has_attachment = True
has_link = True
has_image = True if 'image' in fileinfo['mimetype'] else False
file_user = [iterate_user for iterate_user in users if message['user'] == iterate_user['id']]
file_user_email = get_user_email(file_user[0], domain_name)
s3_path, content_for_link = get_attachment_path_and_content(fileinfo, realm_id)
markdown_links.append(content_for_link)
# construct attachments
build_uploads(added_users[user], realm_id, file_user_email, fileinfo, s3_path,
uploads_list)
build_attachment(realm_id, {message_id}, added_users[user],
fileinfo, s3_path, zerver_attachment)
else:
# For attachments with link not from slack
# Example: Google drive integration
has_link = True
if 'title' in fileinfo:
file_name = fileinfo['title']
else:
file_name = fileinfo['name']
markdown_links.append('[%s](%s)' % (file_name, fileinfo['url_private']))
content = '\n'.join(markdown_links)
return dict(
content=content,
has_attachment=has_attachment,
has_image=has_image,
has_link=has_link,
)
def get_attachment_path_and_content(fileinfo: ZerverFieldsT, realm_id: int) -> Tuple[str,
str]:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the function
# 'upload_message_file'
s3_path = "/".join([
str(realm_id),
'SlackImportAttachment', # This is a special placeholder which should be kept
# in sync with 'exports.py' function 'import_message_data'
format(random.randint(0, 255), 'x'),
random_name(18),
sanitize_name(fileinfo['name'])
])
attachment_path = ('/user_uploads/%s' % (s3_path))
content = '[%s](%s)' % (fileinfo['title'], attachment_path)
return s3_path, content
def build_reactions(reaction_list: List[ZerverFieldsT], reactions: List[ZerverFieldsT],
added_users: AddedUsersT, message_id: int,
name_to_codepoint: ZerverFieldsT,
zerver_realmemoji: List[ZerverFieldsT]) -> None:
realmemoji = {}
for realm_emoji in zerver_realmemoji:
realmemoji[realm_emoji['name']] = realm_emoji['id']
# For the unicode emoji codes, we use equivalent of
# function 'emoji_name_to_emoji_code' in 'zerver/lib/emoji' here
for slack_reaction in reactions:
emoji_name = slack_reaction['name']
# Check in unicode emoji
if emoji_name in name_to_codepoint:
emoji_code = name_to_codepoint[emoji_name]
reaction_type = Reaction.UNICODE_EMOJI
# Check in realm emoji
elif emoji_name in realmemoji:
emoji_code = realmemoji[emoji_name]
reaction_type = Reaction.REALM_EMOJI
else:
continue
for user in slack_reaction['users']:
reaction_id = NEXT_ID('reaction')
reaction = Reaction(
id=reaction_id,
emoji_code=emoji_code,
emoji_name=emoji_name,
reaction_type=reaction_type)
reaction_dict = model_to_dict(reaction,
exclude=['message', 'user_profile'])
reaction_dict['message'] = message_id
reaction_dict['user_profile'] = added_users[user]
reaction_list.append(reaction_dict)
def build_uploads(user_id: int, realm_id: int, email: str, fileinfo: ZerverFieldsT, s3_path: str,
uploads_list: List[ZerverFieldsT]) -> None:
upload = dict(
path=fileinfo['url_private'], # Save slack's url here, which is used later while processing
realm_id=realm_id,
content_type=None,
user_profile_id=user_id,
last_modified=fileinfo['timestamp'],
user_profile_email=email,
s3_path=s3_path,
size=fileinfo['size'])
uploads_list.append(upload)
def get_message_sending_user(message: ZerverFieldsT) -> Optional[str]:
if 'user' in message:
return message['user']
if message.get('file'):
return message['file'].get('user')
return None
def do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: int=6) -> None:
# Subdomain is set by the user while running the import command
realm_subdomain = ""
realm_id = 0
domain_name = settings.EXTERNAL_HOST
slack_data_dir = slack_zip_file.replace('.zip', '')
if not os.path.exists(slack_data_dir):
os.makedirs(slack_data_dir)
os.makedirs(output_dir, exist_ok=True)
# output directory should be empty initially
if os.listdir(output_dir):
raise Exception('Output directory should be empty!')
subprocess.check_call(['unzip', '-q', slack_zip_file, '-d', slack_data_dir])
# with zipfile.ZipFile(slack_zip_file, 'r') as zip_ref:
# zip_ref.extractall(slack_data_dir)
# We get the user data from the legacy token method of slack api, which is depreciated
# but we use it as the user email data is provided only in this method
user_list = get_slack_api_data(token, "https://slack.com/api/users.list", "members")
# Get custom emoji from slack api
custom_emoji_list = get_slack_api_data(token, "https://slack.com/api/emoji.list", "emoji")
realm, added_users, added_recipient, added_channels, avatar_list, \
emoji_url_map = slack_workspace_to_realm(domain_name, realm_id, user_list,
realm_subdomain,
slack_data_dir, custom_emoji_list)
reactions, uploads_list, zerver_attachment = convert_slack_workspace_messages(
slack_data_dir, user_list, realm_id, added_users, added_recipient, added_channels,
realm, realm['zerver_realmemoji'], domain_name, output_dir)
# Move zerver_reactions to realm.json file
realm['zerver_reaction'] = reactions
emoji_folder = os.path.join(output_dir, 'emoji')
os.makedirs(emoji_folder, exist_ok=True)
emoji_records = process_emojis(realm['zerver_realmemoji'], emoji_folder, emoji_url_map, threads)
avatar_folder = os.path.join(output_dir, 'avatars')
avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))
os.makedirs(avatar_realm_folder, exist_ok=True)
avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads, size_url_suffix='-512')
uploads_folder = os.path.join(output_dir, 'uploads')
os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True)
uploads_records = process_uploads(uploads_list, uploads_folder, threads)
attachment = {"zerver_attachment": zerver_attachment}
# IO realm.json
create_converted_data_files(realm, output_dir, '/realm.json')
# IO emoji records
create_converted_data_files(emoji_records, output_dir, '/emoji/records.json')
# IO avatar records
create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')
# IO uploads records
create_converted_data_files(uploads_records, output_dir, '/uploads/records.json')
# IO attachments records
create_converted_data_files(attachment, output_dir, '/attachment.json')
# remove slack dir
rm_tree(slack_data_dir)
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
logging.info('######### DATA CONVERSION FINISHED #########\n')
logging.info("Zulip data dump created at %s" % (output_dir))
def get_data_file(path: str) -> Any:
with open(path, "r") as fp:
data = ujson.load(fp)
return data
def get_slack_api_data(token: str, slack_api_url: str, get_param: str) -> Any:
data = requests.get('%s?token=%s' % (slack_api_url, token))
if data.status_code == requests.codes.ok:
if 'error' in data.json():
raise Exception('Enter a valid token!')
json_data = data.json()[get_param]
return json_data
else:
raise Exception('Something went wrong. Please try again!')
| [
"str",
"str",
"int",
"List[ZerverFieldsT]",
"str",
"str",
"ZerverFieldsT",
"ZerverFieldsT",
"int",
"str",
"List[ZerverFieldsT]",
"int",
"Any",
"str",
"List[ZerverFieldsT]",
"ZerverFieldsT",
"int",
"int",
"ZerverFieldsT",
"ZerverFieldsT",
"ZerverFieldsT",
"ZerverFieldsT",
"ZerverFieldsT",
"int",
"int",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"ZerverFieldsT",
"str",
"str",
"str",
"str",
"ZerverFieldsT",
"ZerverFieldsT",
"str",
"int",
"AddedUsersT",
"List[ZerverFieldsT]",
"List[str]",
"List[ZerverFieldsT]",
"int",
"AddedUsersT",
"int",
"str",
"List[ZerverFieldsT]",
"int",
"AddedUsersT",
"AddedRecipientsT",
"AddedChannelsT",
"ZerverFieldsT",
"List[ZerverFieldsT]",
"str",
"str",
"str",
"AddedChannelsT",
"int",
"List[ZerverFieldsT]",
"AddedUsersT",
"AddedRecipientsT",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"Dict[int, Set[int]]",
"AddedChannelsT",
"str",
"ZerverFieldsT",
"str",
"int",
"int",
"str",
"List[ZerverFieldsT]",
"AddedUsersT",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"ZerverFieldsT",
"int",
"List[ZerverFieldsT]",
"List[ZerverFieldsT]",
"AddedUsersT",
"int",
"ZerverFieldsT",
"List[ZerverFieldsT]",
"int",
"int",
"str",
"ZerverFieldsT",
"str",
"List[ZerverFieldsT]",
"ZerverFieldsT",
"str",
"str",
"str",
"str",
"str",
"str",
"str"
] | [
1410,
1524,
1539,
1555,
1622,
1643,
1696,
4396,
4442,
5409,
5421,
5452,
5500,
5518,
10283,
10312,
10380,
10395,
10448,
11766,
11842,
12301,
12324,
12385,
12407,
12470,
13106,
13185,
13569,
13597,
14350,
14364,
14382,
14597,
14864,
15131,
15146,
15164,
15227,
19983,
20015,
20071,
20089,
20140,
20500,
20512,
20543,
20598,
20628,
20699,
20722,
20793,
20827,
20881,
23294,
23315,
23979,
24029,
24101,
24169,
24239,
24317,
24392,
24467,
24534,
28960,
29014,
29055,
29098,
29135,
29173,
29233,
29291,
29352,
31306,
31331,
32085,
32117,
32171,
32196,
32240,
32294,
33639,
33654,
33666,
33681,
33705,
33742,
34185,
34399,
34416,
34428,
37711,
37837,
37857,
37873
] | [
1413,
1527,
1542,
1574,
1625,
1646,
1709,
4409,
4445,
5412,
5440,
5455,
5503,
5521,
10302,
10325,
10383,
10398,
10461,
11779,
11855,
12314,
12337,
12388,
12410,
12489,
13125,
13204,
13582,
13600,
14353,
14367,
14385,
14610,
14877,
15134,
15149,
15175,
15246,
19992,
20034,
20074,
20100,
20143,
20503,
20531,
20546,
20609,
20644,
20713,
20735,
20812,
20830,
20884,
23297,
23329,
23982,
24048,
24112,
24185,
24258,
24336,
24411,
24481,
24537,
28973,
29017,
29058,
29101,
29138,
29192,
29244,
29310,
29371,
31319,
31334,
32104,
32136,
32182,
32199,
32253,
32313,
33642,
33657,
33669,
33694,
33708,
33761,
34198,
34402,
34419,
34431,
37714,
37840,
37860,
37876
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/data_import/slack_message_conversion.py | import re
from typing import Any, Dict, Tuple, List, Optional
# stubs
ZerverFieldsT = Dict[str, Any]
AddedUsersT = Dict[str, int]
AddedChannelsT = Dict[str, Tuple[str, int]]
# Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>
LINK_REGEX = r"""
(<) # match '>'
(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/|ftp:\/\/)? # protocol and www
([a-z0-9]+([\-\.]{1}[a-z0-9]+)*)(\.) # domain name
([a-z]{2,63}(:[0-9]{1,5})?) # domain
(\/[^>]*)? # path
(\|)?(?:\|([^>]+))? # char after pipe (for slack links)
(>)
"""
SLACK_MAILTO_REGEX = r"""
<((mailto:)? # match `<mailto:`
([\w\.-]+@[\w\.-]+(\.[\w]+)+)) # match email
(\|)? # match pipe
([\w\.-]+@[\w\.-]+(\.[\w]+)+)?> # match email
"""
SLACK_USERMENTION_REGEX = r"""
(<@) # Start with '<@'
([a-zA-Z0-9]+) # Here we have the Slack id
(\|)? # We not always have a Vertical line in mention
([a-zA-Z0-9]+)? # If Vertical line is present, this is short name
(>) # ends with '>'
"""
# Slack doesn't have mid-word message-formatting like Zulip.
# Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke
# formats the word in Zulip
SLACK_STRIKETHROUGH_REGEX = r"""
(^|[ -(]|[+-/]|\*|\_|[:-?]|\{|\[|\||\^) # Start after specified characters
(\~) # followed by an asterisk
([ -)+-}—]*)([ -}]+) # any character except asterisk
(\~) # followed by an asterisk
($|[ -']|[+-/]|[:-?]|\*|\_|\}|\)|\]|\||\^) # ends with specified characters
"""
SLACK_ITALIC_REGEX = r"""
(^|[ -*]|[+-/]|[:-?]|\{|\[|\||\^|~)
(\_)
([ -^`~—]*)([ -^`-~]+) # any character
(\_)
($|[ -']|[+-/]|[:-?]|\}|\)|\]|\*|\||\^|~)
"""
SLACK_BOLD_REGEX = r"""
(^|[ -(]|[+-/]|[:-?]|\{|\[|\_|\||\^|~)
(\*)
([ -)+-~—]*)([ -)+-~]+) # any character
(\*)
($|[ -']|[+-/]|[:-?]|\}|\)|\]|\_|\||\^|~)
"""
def get_user_full_name(user: ZerverFieldsT) -> str:
if user['deleted'] is False:
if user['real_name'] == '':
return user['name']
else:
return user['real_name']
else:
return user['name']
# Markdown mapping
def convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],
added_channels: AddedChannelsT,
added_users: AddedUsersT) -> Tuple[str, List[int], bool]:
mentioned_users_id = []
text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, "**")
text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, "~~")
text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, "*")
# Map Slack's mention all: '<!everyone>' to '@**all** '
# Map Slack's mention all: '<!channel>' to '@**all** '
# Map Slack's mention all: '<!here>' to '@**all** '
# No regex for this as it can be present anywhere in the sentence
text = text.replace('<!everyone>', '@**all**')
text = text.replace('<!channel>', '@**all**')
text = text.replace('<!here>', '@**all**')
# Map Slack channel mention: '<#C5Z73A7RA|general>' to '#**general**'
for cname, ids in added_channels.items():
cid = ids[0]
text = text.replace('<#%s|%s>' % (cid, cname), '#**' + cname + '**')
tokens = text.split(' ')
for iterator in range(len(tokens)):
# Check user mentions and change mention format from
# '<@slack_id|short_name>' to '@**full_name**'
if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):
tokens[iterator], user_id = get_user_mentions(tokens[iterator],
users, added_users)
if user_id is not None:
mentioned_users_id.append(user_id)
text = ' '.join(tokens)
# Check and convert link format
text, has_link = convert_link_format(text)
# convert `<mailto:foo@foo.com>` to `mailto:foo@foo.com`
text, has_mailto_link = convert_mailto_format(text)
if has_link is True or has_mailto_link is True:
message_has_link = True
else:
message_has_link = False
return text, mentioned_users_id, message_has_link
def get_user_mentions(token: str, users: List[ZerverFieldsT],
added_users: AddedUsersT) -> Tuple[str, Optional[int]]:
slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)
short_name = slack_usermention_match.group(4) # type: ignore # slack_usermention_match exists and is not None
slack_id = slack_usermention_match.group(2) # type: ignore # slack_usermention_match exists and is not None
for user in users:
if (user['id'] == slack_id and user['name'] == short_name and short_name) or \
(user['id'] == slack_id and short_name is None):
full_name = get_user_full_name(user)
user_id = added_users[slack_id]
mention = "@**" + full_name + "**"
token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)
return token, user_id
return token, None
# Map italic, bold and strikethrough markdown
def convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:
"""
Returns:
1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'
2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'
3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'
"""
for match in re.finditer(regex, text, re.VERBOSE):
converted_token = (match.group(1) + zulip_keyword + match.group(3)
+ match.group(4) + zulip_keyword + match.group(6))
text = text.replace(match.group(0), converted_token)
return text
def convert_link_format(text: str) -> Tuple[str, bool]:
"""
1. Converts '<https://foo.com>' to 'https://foo.com'
2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'
"""
has_link = False
for match in re.finditer(LINK_REGEX, text, re.VERBOSE):
converted_text = match.group(0).replace('>', '').replace('<', '')
has_link = True
text = text.replace(match.group(0), converted_text)
return text, has_link
def convert_mailto_format(text: str) -> Tuple[str, bool]:
"""
1. Converts '<mailto:foo@foo.com>' to 'mailto:foo@foo.com'
2. Converts '<mailto:foo@foo.com|foo@foo.com>' to 'mailto:foo@foo.com'
"""
has_link = False
for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):
has_link = True
text = text.replace(match.group(0), match.group(1))
return text, has_link
| [
"ZerverFieldsT",
"str",
"List[ZerverFieldsT]",
"AddedChannelsT",
"AddedUsersT",
"str",
"List[ZerverFieldsT]",
"AddedUsersT",
"str",
"str",
"str",
"str",
"str"
] | [
3046,
3315,
3327,
3394,
3453,
5295,
5307,
5363,
6272,
6284,
6304,
6910,
7372
] | [
3059,
3318,
3346,
3408,
3464,
5298,
5326,
5374,
6275,
6287,
6307,
6913,
7375
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/decorator.py |
import django_otp
from two_factor.utils import default_device
from django_otp import user_has_device, _user_is_authenticated
from django_otp.conf import settings as otp_settings
from django.contrib.auth.decorators import user_passes_test as django_user_passes_test
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import REDIRECT_FIELD_NAME, login as django_login
from django.views.decorators.csrf import csrf_exempt
from django.http import QueryDict, HttpResponseNotAllowed, HttpRequest
from django.http.multipartparser import MultiPartParser
from zerver.models import Realm, UserProfile, get_client, get_user_profile_by_api_key
from zerver.lib.response import json_error, json_unauthorized, json_success
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.timezone import now as timezone_now
from django.conf import settings
from zerver.lib.queue import queue_json_publish
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.utils import statsd, is_remote_server
from zerver.lib.exceptions import RateLimited, JsonableError, ErrorCode, \
InvalidJSONError
from zerver.lib.types import ViewFuncT
from zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \
api_calls_left, RateLimitedUser
from zerver.lib.request import REQ, has_request_variables, JsonableError, RequestVariableMissingError
from django.core.handlers import base
from functools import wraps
import base64
import datetime
import ujson
import logging
from io import BytesIO
import urllib
from typing import Union, Any, Callable, Sequence, Dict, Optional, TypeVar, Tuple, cast
from zerver.lib.logging_util import log_to_file
# This is a hack to ensure that RemoteZulipServer always exists even
# if Zilencer isn't enabled.
if settings.ZILENCER_ENABLED:
from zilencer.models import get_remote_server_by_uuid, RemoteZulipServer
else: # nocoverage # Hack here basically to make impossible code paths compile
from mock import Mock
get_remote_server_by_uuid = Mock()
RemoteZulipServer = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188
ReturnT = TypeVar('ReturnT')
webhook_logger = logging.getLogger("zulip.zerver.webhooks")
log_to_file(webhook_logger, settings.API_KEY_ONLY_WEBHOOK_LOG_PATH)
class _RespondAsynchronously:
pass
# Return RespondAsynchronously from an @asynchronous view if the
# response will be provided later by calling handler.zulip_finish(),
# or has already been provided this way. We use this for longpolling
# mode.
RespondAsynchronously = _RespondAsynchronously()
AsyncWrapperT = Callable[..., Union[HttpResponse, _RespondAsynchronously]]
def asynchronous(method: Callable[..., Union[HttpResponse, _RespondAsynchronously]]) -> AsyncWrapperT:
# TODO: this should be the correct annotation when mypy gets fixed: type:
# (Callable[[HttpRequest, base.BaseHandler, Sequence[Any], Dict[str, Any]],
# Union[HttpResponse, _RespondAsynchronously]]) ->
# Callable[[HttpRequest, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]
# TODO: see https://github.com/python/mypy/issues/1655
@wraps(method)
def wrapper(request: HttpRequest, *args: Any,
**kwargs: Any) -> Union[HttpResponse, _RespondAsynchronously]:
return method(request, handler=request._tornado_handler, *args, **kwargs)
if getattr(method, 'csrf_exempt', False): # nocoverage # Our one @asynchronous route requires CSRF
wrapper.csrf_exempt = True # type: ignore # https://github.com/JukkaL/mypy/issues/1170
return wrapper
def cachify(method: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
dct = {} # type: Dict[Tuple[Any, ...], ReturnT]
def cache_wrapper(*args: Any) -> ReturnT:
tup = tuple(args)
if tup in dct:
return dct[tup]
result = method(*args)
dct[tup] = result
return result
return cache_wrapper
def update_user_activity(request: HttpRequest, user_profile: UserProfile,
query: Optional[str]) -> None:
# update_active_status also pushes to rabbitmq, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == '/json/users/me/presence':
return
if query is not None:
pass
elif hasattr(request, '_query'):
query = request._query
else:
query = request.META['PATH_INFO']
event = {'query': query,
'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(timezone_now()),
'client': request.client.name}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if (request.method != "POST" and
not (request.method == "SOCKET" and
request.META['zulip.emulated_method'] == "POST")):
if request.method == "SOCKET": # nocoverage # zulip.emulated_method is always POST
err_method = "SOCKET/%s" % (request.META['zulip.emulated_method'],)
else:
err_method = request.method
logging.warning('Method Not Allowed (%s): %s', err_method, request.path,
extra={'status_code': 405, 'request': request})
return HttpResponseNotAllowed(["POST"])
return func(request, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
def require_realm_admin(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(request: HttpRequest, user_profile: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
if not user_profile.is_realm_admin:
raise JsonableError(_("Must be an organization administrator"))
return func(request, user_profile, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
def require_billing_access(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(request: HttpRequest, user_profile: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
if not user_profile.is_realm_admin and not user_profile.is_billing_admin:
raise JsonableError(_("Must be a billing administrator or an organization administrator"))
return func(request, user_profile, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
from zerver.lib.user_agent import parse_user_agent
def get_client_name(request: HttpRequest, is_browser_view: bool) -> str:
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# User-Agent.
if 'client' in request.GET:
return request.GET['client']
if 'client' in request.POST:
return request.POST['client']
if "HTTP_USER_AGENT" in request.META:
user_agent = parse_user_agent(request.META["HTTP_USER_AGENT"]) # type: Optional[Dict[str, str]]
else:
user_agent = None
if user_agent is not None:
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a browser view
if is_browser_view and not user_agent["name"].startswith("Zulip"):
# Avoid changing the client string for browsers, but let
# the Zulip desktop and mobile apps be themselves.
return "website"
else:
return user_agent["name"]
else:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
if is_browser_view:
return "website"
else:
return "Unspecified"
def process_client(request: HttpRequest, user_profile: UserProfile,
*, is_browser_view: bool=False,
client_name: Optional[str]=None,
remote_server_request: bool=False,
query: Optional[str]=None) -> None:
if client_name is None:
client_name = get_client_name(request, is_browser_view)
request.client = get_client(client_name)
if not remote_server_request:
update_user_activity(request, user_profile, query)
class InvalidZulipServerError(JsonableError):
code = ErrorCode.INVALID_ZULIP_SERVER
data_fields = ['role']
def __init__(self, role: str) -> None:
self.role = role # type: str
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: {role} is not registered"
class InvalidZulipServerKeyError(InvalidZulipServerError):
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: key does not match role {role}"
def validate_api_key(request: HttpRequest, role: Optional[str],
api_key: str, is_webhook: bool=False,
client_name: Optional[str]=None) -> Union[UserProfile, RemoteZulipServer]:
# Remove whitespace to protect users from trivial errors.
api_key = api_key.strip()
if role is not None:
role = role.strip()
if settings.ZILENCER_ENABLED and role is not None and is_remote_server(role):
try:
remote_server = get_remote_server_by_uuid(role)
except RemoteZulipServer.DoesNotExist:
raise InvalidZulipServerError(role)
if api_key != remote_server.api_key:
raise InvalidZulipServerKeyError(role)
if get_subdomain(request) != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain for push notifications bouncer"))
request.user = remote_server
request._email = "zulip-server:" + role
remote_server.rate_limits = ""
process_client(request, remote_server, remote_server_request=True)
return remote_server
user_profile = access_user_by_api_key(request, api_key, email=role)
if user_profile.is_incoming_webhook and not is_webhook:
raise JsonableError(_("This API is not available to incoming webhook bots."))
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile, client_name=client_name)
return user_profile
def validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfile) -> None:
if user_profile.realm.deactivated:
raise JsonableError(_("This organization has been deactivated"))
if not user_profile.is_active:
raise JsonableError(_("Account is deactivated"))
# Either the subdomain matches, or processing a websockets message
# in the message_sender worker (which will have already had the
# subdomain validated), or we're accessing Tornado from and to
# localhost (aka spoofing a request as the user).
if (not user_matches_subdomain(get_subdomain(request), user_profile) and
not (request.method == "SOCKET" and
request.META['SERVER_NAME'] == "127.0.0.1") and
not (settings.RUNNING_INSIDE_TORNADO and
request.META["SERVER_NAME"] == "127.0.0.1" and
request.META["REMOTE_ADDR"] == "127.0.0.1")):
logging.warning("User %s (%s) attempted to access API on wrong subdomain (%s)" % (
user_profile.email, user_profile.realm.subdomain, get_subdomain(request)))
raise JsonableError(_("Account is not associated with this subdomain"))
def access_user_by_api_key(request: HttpRequest, api_key: str, email: Optional[str]=None) -> UserProfile:
try:
user_profile = get_user_profile_by_api_key(api_key)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid API key"))
if email is not None and email.lower() != user_profile.email.lower():
# This covers the case that the API key is correct, but for a
# different user. We may end up wanting to relaxing this
# constraint or give a different error message in the future.
raise JsonableError(_("Invalid API key"))
validate_account_and_subdomain(request, user_profile)
return user_profile
def log_exception_to_webhook_logger(request: HttpRequest, user_profile: UserProfile,
request_body: Optional[str]=None) -> None:
if request_body is not None:
payload = request_body
else:
payload = request.body
if request.content_type == 'application/json':
try:
payload = ujson.dumps(ujson.loads(payload), indent=4)
except ValueError:
request_body = str(payload)
else:
request_body = str(payload)
custom_header_template = "{header}: {value}\n"
header_text = ""
for header in request.META.keys():
if header.lower().startswith('http_x'):
header_text += custom_header_template.format(
header=header, value=request.META[header])
header_message = header_text if header_text else None
message = """
user: {email} ({realm})
client: {client_name}
URL: {path_info}
content_type: {content_type}
custom_http_headers:
{custom_headers}
body:
{body}
""".format(
email=user_profile.email,
realm=user_profile.realm.string_id,
client_name=request.client.name,
body=payload,
path_info=request.META.get('PATH_INFO', None),
content_type=request.content_type,
custom_headers=header_message,
)
message = message.strip(' ')
webhook_logger.exception(message)
def full_webhook_client_name(raw_client_name: Optional[str]=None) -> Optional[str]:
if raw_client_name is None:
return None
return "Zulip{}Webhook".format(raw_client_name)
# Use this for webhook views that don't get an email passed in.
def api_key_only_webhook_view(
webhook_client_name: str,
notify_bot_owner_on_invalid_json: Optional[bool]=True
) -> Callable[[ViewFuncT], ViewFuncT]:
# TODO The typing here could be improved by using the Extended Callable types:
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types
def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request: HttpRequest, api_key: str=REQ(),
*args: Any, **kwargs: Any) -> HttpResponse:
user_profile = validate_api_key(request, None, api_key, is_webhook=True,
client_name=full_webhook_client_name(webhook_client_name))
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain='all')
try:
return view_func(request, user_profile, *args, **kwargs)
except InvalidJSONError as e:
if not notify_bot_owner_on_invalid_json:
raise e
# NOTE: importing this at the top of file leads to a
# cyclic import; correct fix is probably to move
# notify_bot_owner_about_invalid_json to a smaller file.
from zerver.lib.webhooks.common import notify_bot_owner_about_invalid_json
notify_bot_owner_about_invalid_json(user_profile, webhook_client_name)
except Exception as err:
log_exception_to_webhook_logger(request, user_profile)
raise err
return _wrapped_func_arguments
return _wrapped_view_func
# From Django 1.8, modified to leave off ?next=/
def redirect_to_login(next: str, login_url: Optional[str]=None,
redirect_field_name: str=REDIRECT_FIELD_NAME) -> HttpResponseRedirect:
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urllib.parse.urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
# Don't add ?next=/, to keep our URLs clean
if next != '/':
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urllib.parse.urlunparse(login_url_parts))
# From Django 1.8
def user_passes_test(test_func: Callable[[HttpResponse], bool], login_url: Optional[str]=None,
redirect_field_name: str=REDIRECT_FIELD_NAME) -> Callable[[ViewFuncT], ViewFuncT]:
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if test_func(request):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view # type: ignore # https://github.com/python/mypy/issues/1927
return decorator
def logged_in_and_active(request: HttpRequest) -> bool:
if not request.user.is_authenticated:
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return user_matches_subdomain(get_subdomain(request), request.user)
def do_two_factor_login(request: HttpRequest, user_profile: UserProfile) -> None:
device = default_device(user_profile)
if device:
django_otp.login(request, device)
def do_login(request: HttpRequest, user_profile: UserProfile) -> None:
"""Creates a session, logging in the user, using the Django method,
and also adds helpful data needed by our server logs.
"""
django_login(request, user_profile)
request._email = user_profile.email
process_client(request, user_profile, is_browser_view=True)
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
# Login with two factor authentication as well.
do_two_factor_login(request, user_profile)
def log_view_func(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
request._query = view_func.__name__
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def add_logging_data(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
request._email = request.user.email
process_client(request, request.user, is_browser_view=True,
query=view_func.__name__)
return rate_limit()(view_func)(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def human_users_only(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if request.user.is_bot:
return json_error(_("This endpoint does not accept bot requests."))
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# Based on Django 1.8's @login_required
def zulip_login_required(
function: Optional[ViewFuncT]=None,
redirect_field_name: str=REDIRECT_FIELD_NAME,
login_url: str=settings.HOME_NOT_LOGGED_IN,
) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:
actual_decorator = user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name
)
otp_required_decorator = zulip_otp_required(
redirect_field_name=redirect_field_name,
login_url=login_url
)
if function:
# Add necessary logging data via add_logging_data
return actual_decorator(zulip_otp_required(add_logging_data(function)))
return actual_decorator(otp_required_decorator) # nocoverage # We don't use this without a function
def require_server_admin(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if not request.user.is_staff:
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
return add_logging_data(view_func)(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def require_server_admin_api(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,
**kwargs: Any) -> HttpResponse:
if not user_profile.is_staff:
raise JsonableError(_("Must be an server administrator"))
return view_func(request, user_profile, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def require_non_guest_user(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,
**kwargs: Any) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
return view_func(request, user_profile, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def require_non_guest_human_user(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,
**kwargs: Any) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
if user_profile.is_bot:
return json_error(_("This endpoint does not accept bot requests."))
return view_func(request, user_profile, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# authenticated_api_view will add the authenticated user's
# user_profile to the view function's arguments list, since we have to
# look it up anyway. It is deprecated in favor on the REST API
# versions.
def authenticated_api_view(is_webhook: bool=False) -> Callable[[ViewFuncT], ViewFuncT]:
def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:
@csrf_exempt
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request: HttpRequest, email: str=REQ(),
api_key: Optional[str]=REQ(default=None),
api_key_legacy: Optional[str]=REQ('api-key', default=None),
*args: Any, **kwargs: Any) -> HttpResponse:
if api_key is None:
api_key = api_key_legacy
if api_key is None: # nocoverage # We're removing this whole decorator soon.
raise RequestVariableMissingError("api_key")
user_profile = validate_api_key(request, email, api_key, is_webhook)
# Apply rate limiting
limited_func = rate_limit()(view_func)
try:
return limited_func(request, user_profile, *args, **kwargs)
except Exception as err:
if is_webhook:
# In this case, request_body is passed explicitly because the body
# of the request has already been read in has_request_variables and
# can't be read/accessed more than once, so we just access it from
# the request.POST QueryDict.
log_exception_to_webhook_logger(request, user_profile,
request_body=request.POST.get('payload'))
raise err
return _wrapped_func_arguments
return _wrapped_view_func
# This API endpoint is used only for the mobile apps. It is part of a
# workaround for the fact that React Native doesn't support setting
# HTTP basic authentication headers.
def authenticated_uploads_api_view() -> Callable[[ViewFuncT], ViewFuncT]:
def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request: HttpRequest,
api_key: str=REQ(),
*args: Any, **kwargs: Any) -> HttpResponse:
user_profile = validate_api_key(request, None, api_key, False)
limited_func = rate_limit()(view_func)
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP Basic
# authentication.
#
# If webhook_client_name is specific, the request is a webhook view
# with that string as the basis for the client string.
def authenticated_rest_api_view(*, webhook_client_name: Optional[str]=None,
is_webhook: bool=False) -> Callable[[ViewFuncT], ViewFuncT]:
def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META['HTTP_AUTHORIZATION'].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
return json_error(_("This endpoint requires HTTP basic authentication."))
role, api_key = base64.b64decode(credentials).decode('utf-8').split(":")
except ValueError:
return json_unauthorized(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized(_("Missing authorization header for basic auth"))
# Now we try to do authentication or die
try:
# profile is a Union[UserProfile, RemoteZulipServer]
profile = validate_api_key(request, role, api_key,
is_webhook=is_webhook or webhook_client_name is not None,
client_name=full_webhook_client_name(webhook_client_name))
except JsonableError as e:
return json_unauthorized(e.msg)
try:
# Apply rate limiting
return rate_limit()(view_func)(request, profile, *args, **kwargs)
except Exception as err:
if is_webhook or webhook_client_name is not None:
request_body = request.POST.get('payload')
if request_body is not None:
log_exception_to_webhook_logger(request, profile,
request_body=request_body)
raise err
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(
request.META,
BytesIO(request.body),
request.upload_handlers,
request.encoding
).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(request: HttpRequest,
view_func: ViewFuncT,
*args: Any, **kwargs: Any) -> HttpResponse:
if not request.user.is_authenticated:
return json_error(_("Not logged in"), status=401)
user_profile = request.user
validate_account_and_subdomain(request, user_profile)
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
process_client(request, user_profile, is_browser_view=True,
query=view_func.__name__)
request._email = user_profile.email
return rate_limit()(view_func)(request, user_profile, *args, **kwargs)
# Checks if the request is a POST request and that the user is logged
# in. If not, return an error (the @login_required behavior of
# redirecting to a login page doesn't make sense for json views)
def authenticated_json_post_view(view_func: ViewFuncT) -> ViewFuncT:
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest,
*args: Any, **kwargs: Any) -> HttpResponse:
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticated_json_view(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest,
*args: Any, **kwargs: Any) -> HttpResponse:
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def is_local_addr(addr: str) -> bool:
return addr in ('127.0.0.1', '::1')
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request: HttpRequest) -> bool:
return (is_local_addr(request.META['REMOTE_ADDR']) and
request.POST.get('secret') == settings.SHARED_SECRET)
def client_is_exempt_from_rate_limiting(request: HttpRequest) -> bool:
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
return ((request.client and request.client.name.lower() == 'internal') and
(is_local_addr(request.META['REMOTE_ADDR']) or
settings.DEBUG_RATE_LIMITING))
def internal_notify_view(is_tornado_view: bool) -> Callable[[ViewFuncT], ViewFuncT]:
# The typing here could be improved by using the Extended Callable types:
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types
"""Used for situations where something running on the Zulip server
needs to make a request to the (other) Django/Tornado processes running on
the server."""
def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_func_arguments(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if not authenticate_notify(request):
return json_error(_('Access denied'), status=403)
is_tornado_request = hasattr(request, '_tornado_handler')
# These next 2 are not security checks; they are internal
# assertions to help us find bugs.
if is_tornado_view and not is_tornado_request:
raise RuntimeError('Tornado notify view called with no Tornado handler')
if not is_tornado_view and is_tornado_request:
raise RuntimeError('Django notify view called with Tornado handler')
request._email = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# Converter functions for use with has_request_variables
def to_non_negative_int(s: str) -> int:
x = int(s)
if x < 0:
raise ValueError("argument is negative")
return x
def to_not_negative_int_or_none(s: str) -> Optional[int]:
if s:
return to_non_negative_int(s)
return None
def to_utc_datetime(timestamp: str) -> datetime.datetime:
return timestamp_to_datetime(float(timestamp))
def statsd_increment(counter: str, val: int=1,
) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
@wraps(func)
def wrapped_func(*args: Any, **kwargs: Any) -> ReturnT:
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return wrapped_func
return wrapper
def rate_limit_user(request: HttpRequest, user: UserProfile, domain: str) -> None:
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
entity = RateLimitedUser(user, domain=domain)
ratelimited, time = is_ratelimited(entity)
request._ratelimit_applied_limits = True
request._ratelimit_secs_to_freedom = time
request._ratelimit_over_limit = ratelimited
# Abort this request if the user is over their rate limits
if ratelimited:
statsd.incr("ratelimiter.limited.%s.%s" % (type(user), user.id))
raise RateLimited()
incr_ratelimit(entity)
calls_remaining, time_reset = api_calls_left(entity)
request._ratelimit_remaining = calls_remaining
request._ratelimit_secs_to_freedom = time_reset
def rate_limit(domain: str='all') -> Callable[[ViewFuncT], ViewFuncT]:
"""Rate-limits a view. Takes an optional 'domain' param if you wish to
rate limit different types of API calls independently.
Returns a decorator"""
def wrapper(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapped_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
try:
user = request.user
except Exception: # nocoverage # See comments below
# TODO: This logic is not tested, and I'm not sure we are
# doing the right thing here.
user = None
if not user: # nocoverage # See comments below
logging.error("Requested rate-limiting on %s but user is not authenticated!" %
func.__name__)
return func(request, *args, **kwargs)
# Rate-limiting data is stored in redis
# We also only support rate-limiting authenticated
# views right now.
# TODO(leo) - implement per-IP non-authed rate limiting
rate_limit_user(request, user, domain)
return func(request, *args, **kwargs)
return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927
return wrapper
def return_success_on_head_request(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if request.method == 'HEAD':
return json_success()
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def zulip_otp_required(view: Any=None,
redirect_field_name: str='next',
login_url: str=settings.HOME_NOT_LOGGED_IN,
) -> Callable[..., HttpResponse]:
"""
The reason we need to create this function is that the stock
otp_required decorator doesn't play well with tests. We cannot
enable/disable if_configured parameter during tests since the decorator
retains its value due to closure.
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`verified`. By default, this redirects users
to :setting:`OTP_LOGIN_URL`.
"""
def test(user: UserProfile) -> bool:
"""
:if_configured: If ``True``, an authenticated user with no confirmed
OTP devices will be allowed. Default is ``False``. If ``False``,
2FA will not do any authentication.
"""
if_configured = settings.TWO_FACTOR_AUTHENTICATION_ENABLED
if not if_configured:
return True
return user.is_verified() or (_user_is_authenticated(user)
and not user_has_device(user))
decorator = django_user_passes_test(test,
login_url=login_url,
redirect_field_name=redirect_field_name)
return decorator if (view is None) else decorator(view)
| [
"Callable[..., Union[HttpResponse, _RespondAsynchronously]]",
"HttpRequest",
"Any",
"Any",
"Callable[..., ReturnT]",
"Any",
"HttpRequest",
"UserProfile",
"Optional[str]",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"UserProfile",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"UserProfile",
"Any",
"Any",
"HttpRequest",
"bool",
"HttpRequest",
"UserProfile",
"str",
"HttpRequest",
"Optional[str]",
"str",
"HttpRequest",
"UserProfile",
"HttpRequest",
"str",
"HttpRequest",
"UserProfile",
"str",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"str",
"Callable[[HttpResponse], bool]",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"HttpRequest",
"HttpRequest",
"UserProfile",
"HttpRequest",
"UserProfile",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"UserProfile",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"UserProfile",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"UserProfile",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"HttpRequest",
"ViewFuncT",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"str",
"HttpRequest",
"HttpRequest",
"bool",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"str",
"str",
"str",
"str",
"Callable[..., ReturnT]",
"Any",
"Any",
"HttpRequest",
"UserProfile",
"str",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"ViewFuncT",
"HttpRequest",
"Any",
"Any",
"UserProfile"
] | [
2874,
3382,
3402,
3433,
3808,
3942,
4175,
4202,
4247,
4951,
5018,
5038,
5053,
5844,
5911,
5938,
5958,
5973,
6289,
6356,
6383,
6403,
6418,
6847,
6877,
8228,
8255,
8857,
9239,
9258,
9303,
10735,
10762,
11894,
11916,
12580,
12607,
14233,
14549,
14697,
14773,
14788,
15991,
16746,
17178,
17301,
17321,
17336,
18307,
18614,
18641,
18785,
18812,
19304,
19387,
19407,
19422,
19663,
19746,
19766,
19781,
20153,
20236,
20256,
20271,
21406,
21515,
21535,
21550,
21881,
21990,
22017,
22037,
22079,
22404,
22487,
22514,
22534,
22576,
22899,
22982,
23009,
23029,
23071,
23794,
23964,
24212,
24227,
25666,
25814,
25926,
25941,
26656,
26773,
26793,
26808,
28811,
28894,
28914,
28929,
30248,
30310,
30366,
30381,
31176,
31304,
31351,
31366,
31605,
31688,
31735,
31750,
31974,
32263,
32460,
32825,
33241,
33380,
33400,
33415,
34280,
34421,
34541,
34650,
34930,
35034,
35049,
35244,
35263,
35284,
36377,
36457,
36477,
36492,
37926,
38009,
38029,
38044,
38969
] | [
2932,
3393,
3405,
3436,
3830,
3945,
4186,
4213,
4260,
4960,
5029,
5041,
5056,
5853,
5922,
5949,
5961,
5976,
6298,
6367,
6394,
6406,
6421,
6858,
6881,
8239,
8266,
8860,
9250,
9271,
9306,
10746,
10773,
11905,
11919,
12591,
12618,
14236,
14558,
14708,
14776,
14791,
15994,
16776,
17187,
17312,
17324,
17339,
18318,
18625,
18652,
18796,
18823,
19313,
19398,
19410,
19425,
19672,
19757,
19769,
19784,
20162,
20247,
20259,
20274,
21415,
21526,
21538,
21553,
21890,
22001,
22028,
22040,
22082,
22413,
22498,
22525,
22537,
22579,
22908,
22993,
23020,
23032,
23074,
23803,
23975,
24215,
24230,
25675,
25825,
25929,
25944,
26665,
26784,
26796,
26811,
28820,
28905,
28917,
28932,
30259,
30319,
30369,
30384,
31185,
31315,
31354,
31369,
31614,
31699,
31738,
31753,
31977,
32274,
32471,
32829,
33250,
33391,
33403,
33418,
34283,
34424,
34544,
34653,
34952,
35037,
35052,
35255,
35274,
35287,
36386,
36468,
36480,
36495,
37935,
38020,
38032,
38047,
38980
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/filters.py |
import re
from typing import Any, Dict
from django.http import HttpRequest
from django.views.debug import SafeExceptionReporterFilter
class ZulipExceptionReporterFilter(SafeExceptionReporterFilter):
def get_post_parameters(self, request: HttpRequest) -> Dict[str, Any]:
filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy()
filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream',
'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key']
for var in filtered_vars:
if var in filtered_post:
filtered_post[var] = '**********'
return filtered_post
def clean_data_from_query_parameters(val: str) -> str:
return re.sub(r"([a-z_-]+=)([^&]+)([&]|$)", r"\1******\3", val)
| [
"HttpRequest",
"str"
] | [
245,
744
] | [
256,
747
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/forms.py |
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \
PasswordResetForm
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.core.validators import validate_email
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.http import HttpRequest
from jinja2 import Markup as mark_safe
from zerver.lib.actions import do_change_password, email_not_system_bot, \
validate_email_for_realm
from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain
from zerver.lib.request import JsonableError
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain, is_root_domain_available
from zerver.lib.users import check_full_name
from zerver.models import Realm, get_user, UserProfile, get_realm, email_to_domain, \
email_allowed_for_realm, DisposableEmailError, DomainNotAllowedForRealmError, \
EmailContainsPlusError
from zproject.backends import email_auth_enabled, email_belongs_to_ldap
import logging
import re
import DNS
from typing import Any, Callable, List, Optional, Dict
from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm
from two_factor.utils import totp_digits
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
u'If you want to sign up an alias for Zulip, ' + \
u'<a href="mailto:support@zulipchat.com">contact us</a>.'
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
"organization associated with this subdomain. " + \
"Please contact %s with any questions!" % (FromAddress.SUPPORT,)
def email_is_not_mit_mailing_list(email: str) -> None:
"""Prevent MIT mailing lists from signing up for Zulip"""
if "@mit.edu" in email:
username = email.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
except DNS.Base.ServerError as e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))
else:
raise AssertionError("Unexpected DNS error")
def check_subdomain_available(subdomain: str, from_management_command: bool=False) -> None:
error_strings = {
'too short': _("Subdomain needs to have length 3 or greater."),
'extremal dash': _("Subdomain cannot start or end with a '-'."),
'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Subdomain unavailable. Please choose a different one.")}
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
if is_root_domain_available():
return
raise ValidationError(error_strings['unavailable'])
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(error_strings['extremal dash'])
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(error_strings['bad character'])
if from_management_command:
return
if len(subdomain) < 3:
raise ValidationError(error_strings['too short'])
if is_reserved_subdomain(subdomain) or \
get_realm(subdomain) is not None:
raise ValidationError(error_strings['unavailable'])
class RegistrationForm(forms.Form):
MAX_PASSWORD_LENGTH = 100
full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH)
realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False)
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Since the superclass doesn't except random extra kwargs, we
# remove it from the kwargs dict before initializing.
self.realm_creation = kwargs['realm_creation']
del kwargs['realm_creation']
super().__init__(*args, **kwargs)
if settings.TERMS_OF_SERVICE:
self.fields['terms'] = forms.BooleanField(required=True)
self.fields['realm_name'] = forms.CharField(
max_length=Realm.MAX_REALM_NAME_LENGTH,
required=self.realm_creation)
def clean_full_name(self) -> str:
try:
return check_full_name(self.cleaned_data['full_name'])
except JsonableError as e:
raise ValidationError(e.msg)
def clean_realm_subdomain(self) -> str:
if not self.realm_creation:
# This field is only used if realm_creation
return ""
subdomain = self.cleaned_data['realm_subdomain']
if 'realm_in_root_domain' in self.data:
subdomain = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
check_subdomain_available(subdomain)
return subdomain
class ToSForm(forms.Form):
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
email = forms.EmailField()
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.realm = kwargs.pop('realm', None)
self.from_multiuse_invite = kwargs.pop('from_multiuse_invite', False)
super().__init__(*args, **kwargs)
def clean_email(self) -> str:
"""Returns the email if and only if the user's email address is
allowed to join the realm they are trying to join."""
email = self.cleaned_data['email']
# Otherwise, the user is trying to join a specific realm.
realm = self.realm
from_multiuse_invite = self.from_multiuse_invite
if realm is None:
raise ValidationError(_("The organization you are trying to "
"join using {email} does not "
"exist.").format(email=email))
if not from_multiuse_invite and realm.invite_required:
raise ValidationError(_("Please request an invite for {email} "
"from the organization "
"administrator.").format(email=email))
try:
email_allowed_for_realm(email, realm)
except DomainNotAllowedForRealmError:
raise ValidationError(
_("Your email address, {email}, is not in one of the domains "
"that are allowed to register for accounts in this organization.").format(
string_id=realm.string_id, email=email))
except DisposableEmailError:
raise ValidationError(_("Please use your real email address."))
except EmailContainsPlusError:
raise ValidationError(_("Email addresses containing + are not allowed in this organization."))
validate_email_for_realm(realm, email)
if realm.is_zephyr_mirror_realm:
email_is_not_mit_mailing_list(email)
return email
def email_is_not_disposable(email: str) -> None:
if is_disposable_domain(email_to_domain(email)):
raise ValidationError(_("Please use your real email address."))
class RealmCreationForm(forms.Form):
# This form determines whether users can create a new realm.
email = forms.EmailField(validators=[email_not_system_bot,
email_is_not_disposable])
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit: bool=True) -> UserProfile:
do_change_password(self.user, self.cleaned_data['new_password1'],
commit=commit)
return self.user
def generate_password_reset_url(user_profile: UserProfile,
token_generator: PasswordResetTokenGenerator) -> str:
token = token_generator.make_token(user_profile)
uid = urlsafe_base64_encode(force_bytes(user_profile.id)).decode('ascii')
endpoint = reverse('django.contrib.auth.views.password_reset_confirm',
kwargs=dict(uidb64=uid, token=token))
return "{}{}".format(user_profile.realm.uri, endpoint)
class ZulipPasswordResetForm(PasswordResetForm):
def save(self,
domain_override: Optional[bool]=None,
subject_template_name: str='registration/password_reset_subject.txt',
email_template_name: str='registration/password_reset_email.html',
use_https: bool=False,
token_generator: PasswordResetTokenGenerator=default_token_generator,
from_email: Optional[str]=None,
request: HttpRequest=None,
html_email_template_name: Optional[str]=None,
extra_email_context: Optional[Dict[str, Any]]=None
) -> None:
"""
If the email address has an account in the target realm,
generates a one-use only link for resetting password and sends
to the user.
We send a different email if an associated account does not exist in the
database, or an account does exist, but not in the realm.
Note: We ignore protocol and the various email template arguments (those
are an artifact of using Django's password reset framework).
"""
email = self.cleaned_data["email"]
realm = get_realm(get_subdomain(request))
if not email_auth_enabled(realm):
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
return
if email_belongs_to_ldap(realm, email):
# TODO: Ideally, we'd provide a user-facing error here
# about the fact that they aren't allowed to have a
# password in the Zulip server and should change it in LDAP.
logging.info("Password reset not allowed for user in LDAP domain")
return
if realm.deactivated:
logging.info("Realm is deactivated")
return
user = None # type: Optional[UserProfile]
try:
user = get_user(email, realm)
except UserProfile.DoesNotExist:
pass
context = {
'email': email,
'realm_uri': realm.uri,
}
if user is not None and not user.is_active:
context['user_deactivated'] = True
user = None
if user is not None:
context['active_account_in_realm'] = True
context['reset_url'] = generate_password_reset_url(user, token_generator)
send_email('zerver/emails/password_reset', to_user_id=user.id,
from_name="Zulip Account Security",
from_address=FromAddress.tokenized_no_reply_address(),
context=context)
else:
context['active_account_in_realm'] = False
active_accounts_in_other_realms = UserProfile.objects.filter(email__iexact=email, is_active=True)
if active_accounts_in_other_realms:
context['active_accounts_in_other_realms'] = active_accounts_in_other_realms
send_email('zerver/emails/password_reset', to_email=email,
from_name="Zulip Account Security",
from_address=FromAddress.tokenized_no_reply_address(),
context=context)
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean(self) -> Dict[str, Any]:
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
subdomain = get_subdomain(self.request)
realm = get_realm(subdomain)
return_data = {} # type: Dict[str, Any]
self.user_cache = authenticate(self.request, username=username, password=password,
realm=realm, return_data=return_data)
if return_data.get("inactive_realm"):
raise AssertionError("Programming error: inactive realm in authentication form")
if return_data.get("inactive_user") and not return_data.get("is_mirror_dummy"):
# We exclude mirror dummy accounts here. They should be treated as the
# user never having had an account, so we let them fall through to the
# normal invalid_login case below.
error_msg = (
u"Your account is no longer active. "
u"Please contact your organization administrator to reactivate it.")
raise ValidationError(mark_safe(error_msg))
if return_data.get("invalid_subdomain"):
logging.warning("User %s attempted to password login to wrong subdomain %s" %
(username, subdomain))
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def add_prefix(self, field_name: str) -> str:
"""Disable prefix, since Zulip doesn't use this Django forms feature
(and django-two-factor does use it), and we'd like both to be
happy with this form.
"""
return field_name
class AuthenticationTokenForm(TwoFactorAuthenticationTokenForm):
"""
We add this form to update the widget of otp_token. The default
widget is an input element whose type is a number, which doesn't
stylistically match our theme.
"""
otp_token = forms.IntegerField(label=_("Token"), min_value=1,
max_value=int('9' * totp_digits()),
widget=forms.TextInput)
class MultiEmailField(forms.Field):
def to_python(self, emails: str) -> List[str]:
"""Normalize data to a list of strings."""
if not emails:
return []
return [email.strip() for email in emails.split(',')]
def validate(self, emails: List[str]) -> None:
"""Check if value consists only of valid emails."""
super().validate(emails)
for email in emails:
validate_email(email)
class FindMyTeamForm(forms.Form):
emails = MultiEmailField(
help_text=_("Add up to 10 comma-separated email addresses."))
def clean_emails(self) -> List[str]:
emails = self.cleaned_data['emails']
if len(emails) > 10:
raise forms.ValidationError(_("Please enter at most 10 emails."))
return emails
| [
"str",
"str",
"Any",
"Any",
"Any",
"Any",
"str",
"UserProfile",
"PasswordResetTokenGenerator",
"str",
"str",
"List[str]"
] | [
2321,
2901,
4439,
4454,
5745,
5760,
7651,
8313,
8375,
14004,
14751,
14961
] | [
2324,
2904,
4442,
4457,
5748,
5763,
7654,
8324,
8402,
14007,
14754,
14970
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/__init__.py | [] | [] | [] |
|
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/actions.py | from typing import (
AbstractSet, Any, AnyStr, Callable, Dict, Iterable, List, Mapping, MutableMapping,
Optional, Sequence, Set, Tuple, TypeVar, Union, cast
)
from mypy_extensions import TypedDict
import django.db.utils
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from django.core.files import File
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat, \
RealmCount
from zerver.lib.bugdown import (
version as bugdown_version,
url_embed_preview_enabled_for_realm
)
from zerver.lib.addressee import (
Addressee,
user_profiles_from_unvalidated_emails,
)
from zerver.lib.bot_config import (
ConfigError,
get_bot_config,
get_bot_configs,
set_bot_config,
)
from zerver.lib.cache import (
bot_dict_fields,
delete_user_profile_caches,
to_dict_cache_key_id,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.emoji import emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import StreamDoesNotExistError
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.message import (
access_message,
MessageDict,
render_markdown,
)
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.topic import (
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
ORIG_TOPIC,
LEGACY_PREV_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
)
from zerver.lib.topic_mutes import (
get_topic_mutes,
add_topic_mute,
remove_topic_mute,
)
from zerver.lib.users import (
bulk_get_users,
check_bot_name_available,
check_full_name,
get_api_key,
user_ids_to_users
)
from zerver.lib.user_groups import create_user_group, access_user_group_by_id
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \
RealmDomain, Service, SubMessage, \
Subscription, Recipient, Message, Attachment, UserMessage, RealmAuditLog, \
UserHotspot, MultiuseInvite, ScheduledMessage, \
Client, DefaultStream, DefaultStreamGroup, UserPresence, PushDeviceToken, \
ScheduledEmail, MAX_TOPIC_NAME_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_personal_recipient, get_huddle, \
get_user_profile_by_id, PreregistrationUser, get_display_recipient, \
get_realm, bulk_get_recipients, get_stream_recipient, get_stream_recipients, \
email_allowed_for_realm, email_to_username, display_recipient_cache_key, \
get_user, get_stream_cache_key, active_non_guest_user_ids, \
UserActivityInterval, active_user_ids, get_active_streams, \
realm_filters_for_realm, RealmFilter, stream_name_in_use, \
get_old_unclaimed_attachments, is_cross_realm_bot_email, \
Reaction, EmailChangeStatus, CustomProfileField, \
custom_profile_fields_for_realm, get_huddle_user_ids, \
CustomProfileFieldValue, validate_attachment_request, get_system_bot, \
get_display_recipient_by_id, query_for_ids, get_huddle_recipient, \
UserGroup, UserGroupMembership, get_default_stream_groups, \
get_bot_services, get_bot_dicts_in_realm, DomainNotAllowedForRealmError, \
DisposableEmailError, EmailContainsPlusError
from zerver.lib.alert_words import alert_words_in_realm
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q, Max, Sum
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link, generate_key
from confirmation import settings as confirmation_settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
from zerver.lib.create_user import create_user
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, user_profile_cache_key, \
cache_set_many, cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.html_diff import highlight_html_differences
from zerver.lib.i18n import get_language_name
from zerver.lib.alert_words import user_alert_words, add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.notifications import clear_scheduled_emails, \
clear_scheduled_invitation_emails, enqueue_welcome_emails
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.exceptions import JsonableError, ErrorCode, BugdownRenderingException
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \
claim_attachment, delete_message_image, upload_emoji_image, delete_avatar_image
from zerver.lib.str_utils import NonBinaryStr
from zerver.tornado.event_queue import request_event_queue, send_event
from zerver.lib.types import ProfileFieldData
from analytics.models import StreamCount
import ujson
import time
import re
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
from operator import itemgetter
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(stream.id) | {user.id for user in stream.realm.get_admin_users()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__is_guest=True)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id, }
else:
users = {user.id for user in user_profile.realm.get_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def activity_change_requires_seat_update(user: UserProfile) -> bool:
return user.realm.has_seat_based_plan and not user.is_bot
def send_signup_message(sender: UserProfile, admin_realm_signup_notifications_stream: str,
user_profile: UserProfile, internal: bool=False,
realm: Optional[Realm]=None) -> None:
if internal:
# When this is done using manage.py vs. the web interface
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
internal_send_message(
user_profile.realm,
sender,
"stream",
signup_notifications_stream.name,
"signups",
"%s (%s) just signed up for Zulip. (total: %i)" % (
user_profile.full_name, user_profile.email, user_count
)
)
# We also send a notification to the Zulip administrative realm
admin_realm = get_system_bot(sender).realm
try:
# Check whether the stream exists
get_stream(admin_realm_signup_notifications_stream, admin_realm)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
return
internal_send_message(
admin_realm,
sender,
"stream",
admin_realm_signup_notifications_stream,
user_profile.realm.display_subdomain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in user_profile.realm.get_admin_users()]
send_event(user_profile.realm, event, admin_ids)
def notify_new_user(user_profile: UserProfile, internal: bool=False) -> None:
if settings.NOTIFICATION_BOT is not None:
send_signup_message(settings.NOTIFICATION_BOT, "signups", user_profile, internal)
statsd.gauge("users.signups.%s" % (user_profile.realm.string_id), 1, delta=True)
# We also clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.email)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last 1000 messages on your public streams, so you have
something to look at in your home view once you finish the
tutorial."""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
stream_ids = [stream.id for stream in streams if not stream.invite_only]
recipients = get_stream_recipients(stream_ids)
recent_messages = Message.objects.filter(recipient_id__in=recipients,
pub_date__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list('id', flat=True)[0:1000]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
ums_to_create = [UserMessage(user_profile=user_profile, message_id=message_id,
flags=UserMessage.flags.read)
for message_id in message_ids_to_use
if message_id not in already_ids]
UserMessage.objects.bulk_create(ums_to_create)
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams = prereg_user.streams.all()
acting_user = prereg_user.referred_by # type: Optional[UserProfile]
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \
and settings.NOTIFICATION_BOT is not None:
# This is a cross-realm private message.
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(
id=prereg_user.id).update(status=0)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)
notify_new_user(user_profile)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
event = dict(type="realm_user", op="add",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=user_profile.is_realm_admin,
full_name=user_profile.full_name,
avatar_url=avatar_url(user_profile),
timezone=user_profile.timezone,
date_joined=user_profile.date_joined.isoformat(),
is_guest=user_profile.is_guest,
is_bot=user_profile.is_bot)) # type: Dict[str, Any]
if not user_profile.is_bot:
event["person"]["profile_data"] = {}
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner'] = user_profile.bot_owner.email
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: int=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
short_name: str, is_realm_admin: bool=False, bot_type: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
is_realm_admin=is_realm_admin,
bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
requires_billing_update=activity_change_requires_seat_update(user_profile))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
if bot_type:
notify_created_bot(user_profile)
else:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
requires_billing_update=activity_change_requires_seat_update(user_profile))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
acting_user=acting_user,
requires_billing_update=activity_change_requires_seat_update(user_profile))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any) -> None:
"""Takes in a realm object, the name of an attribute to update, and the
value to update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, property_type,))
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool]) -> None:
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
]
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time)
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time)
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages(user)
do_delete_avatar_image(user)
user.full_name = "Scrubbed {}".format(generate_key()[:15])
scrubbed_email = "scrubbed-{}@{}".format(generate_key()[:15], realm.host)
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails(user_profile.id)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
requires_billing_update=activity_change_requires_seat_update(user_profile))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
def do_change_user_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.email = new_email
user_profile.delivery_email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
payload = dict(user_id=user_profile.id,
new_email=new_email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, user_profile.realm.host, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url
})
send_email('zerver/emails/confirm_new_email', to_email=new_email,
from_name='Zulip Account Security', from_address=FromAddress.tokenized_no_reply_address(),
context=context)
def compute_irc_user_fullname(email: NonBinaryStr) -> NonBinaryStr:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: NonBinaryStr) -> NonBinaryStr:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
short_name=email_to_username(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient = get_personal_recipient(message['message'].sender.id)
if Message.objects.filter(sender=welcome_bot, recipient=human_recipient).count() < 2:
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender,
"Congratulations on your first reply! :tada:\n\n"
"Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
realm_alert_words = alert_words_in_realm(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words=realm_alert_words,
user_ids=user_ids,
mention_data=mention_data,
email_gateway=email_gateway,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
def get_typing_user_profiles(recipient: Recipient, sender_id: int) -> List[UserProfile]:
if recipient.type == Recipient.STREAM:
'''
We don't support typing indicators for streams because they
are expensive and initial user feedback was they were too
distracting.
'''
raise ValueError('Typing indicators not supported for streams')
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
user_ids = list({recipient.type_id, sender_id})
assert(len(user_ids) in [1, 2])
elif recipient.type == Recipient.HUDDLE:
user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
users = [get_user_profile_by_id(user_id) for user_id in user_ids]
return users
RecipientInfoResult = TypedDict('RecipientInfoResult', {
'active_user_ids': Set[int],
'push_notify_user_ids': Set[int],
'stream_push_user_ids': Set[int],
'stream_email_user_ids': Set[int],
'um_eligible_user_ids': Set[int],
'long_term_idle_user_ids': Set[int],
'default_bot_user_ids': Set[int],
'service_bot_tuples': List[Tuple[int, int]],
})
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: Optional[Set[int]]=None) -> RecipientInfoResult:
stream_push_user_ids = set() # type: Set[int]
stream_email_user_ids = set() # type: Set[int]
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
subscription_rows = stream_topic.get_active_subscriptions().values(
'user_profile_id',
'push_notifications',
'email_notifications',
'in_home_view',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if row['push_notifications'] and row['in_home_view']
} - user_ids_muting_topic
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if row['email_notifications'] and row['in_home_view']
} - user_ids_muting_topic
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
if possibly_mentioned_user_ids:
# Important note: Because we haven't rendered bugdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id'
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications']
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r)
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle']
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via bugdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = set([
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
])
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples
) # type: RecipientInfoResult
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict = defaultdict(list) # type: Dict[str, List[Dict[str, Any]]]
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s' %
(user_profile_id, bot_type))
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messsages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages = [] # type: List[ScheduledMessage]
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: Optional[bool]=False) -> List[int]:
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed = set() # type: Set[str]
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = bugdown.MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name()
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
mention_data = message['mention_data']
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Update calculated fields of the message
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = [] # type: List[UserMessageLite]
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
# Claim attachments in message
for message in messages:
if Message.content_has_attachment(message['message'].content):
do_claim_attachments(message['message'])
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'])
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related("realm").get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if url_embed_preview_enabled_for_realm(message['message']) and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if (settings.ENABLE_FEEDBACK and settings.FEEDBACK_BOT and
message['message'].recipient.type == Recipient.PERSONAL):
feedback_bot_id = get_system_bot(email=settings.FEEDBACK_BOT).id
if feedback_bot_id in message['active_user_ids']:
queue_json_publish(
'feedback_messages',
wide_message_dict,
)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
}
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: Set[int],
long_term_idle_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
mentioned_user_ids: Set[int]) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if um.user_profile_id == message.sender.id and \
message.sent_by_human():
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = ','.join([
'(%d, %d, %d)' % (um.user_profile_id, um.message_id, um.flags)
for um in ums
])
query = '''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES
''' + vals
with connection.cursor() as cursor:
cursor.execute(query)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': op,
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
(emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(realm: Realm, notification: Dict[str, Any]) -> None:
recipient_user_profiles = get_typing_user_profiles(notification['recipient'],
notification['sender'].id)
# Only deliver the notification to active user recipients
user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]
sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type = 'typing',
op = notification['op'],
sender = sender_dict,
recipients = recipient_dicts)
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, notification_to: Sequence[str],
operator: str) -> None:
typing_notification = check_typing_notification(sender, notification_to, operator)
do_send_typing_notification(sender.realm, typing_notification)
# check_typing_notification:
# Returns typing notification ready for sending with do_send_typing_notification on success
# or the error message (string) on error.
def check_typing_notification(sender: UserProfile, notification_to: Sequence[str],
operator: str) -> Dict[str, Any]:
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
try:
recipient = recipient_for_emails(notification_to, False,
sender, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
assert recipient.type != Recipient.STREAM
return {'sender': sender, 'recipient': recipient, 'op': operator}
def stream_welcome_message(stream: Stream) -> str:
content = _('Welcome to #**%s**.') % (stream.name,)
if stream.description:
content += '\n\n**' + _('Description') + '**: '
content += stream.description
return content
def prep_stream_welcome_message(stream: Stream) -> Optional[Dict[str, Any]]:
realm = stream.realm
sender = get_system_bot(settings.WELCOME_BOT)
topic = _('hello')
content = stream_welcome_message(stream)
message = internal_prep_stream_message(
realm=realm,
sender=sender,
stream_name=stream.name,
topic=topic,
content=content)
return message
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(stream.realm, event, user_ids)
def get_default_value_for_history_public_to_subscribers(
realm: Realm,
invite_only: bool,
history_public_to_subscribers: Optional[bool]
) -> bool:
if invite_only:
if history_public_to_subscribers is None:
# A private stream's history is non-public by default
history_public_to_subscribers = False
else:
# If we later decide to support public streams without
# history, we can remove this code path.
history_public_to_subscribers = True
if realm.is_zephyr_mirror_realm:
# In the Zephyr mirroring model, history is unconditionally
# not public to subscribers, even for public streams.
history_public_to_subscribers = False
return history_public_to_subscribers
def create_stream_if_needed(realm: Realm,
stream_name: str,
*,
invite_only: bool=False,
is_announcement_only: bool=False,
history_public_to_subscribers: Optional[bool]=None,
stream_description: str="") -> Tuple[Stream, bool]:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers)
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults = dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
is_announcement_only=is_announcement_only,
history_public_to_subscribers=history_public_to_subscribers,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm
)
)
if created:
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in stream.realm.get_admin_users()]
send_stream_creation_event(stream, realm_admin_ids)
return stream, created
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="") -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description)[0]
def create_streams_if_needed(realm: Realm,
stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
is_announcement_only=stream_dict.get("is_announcement_only", False),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", "")
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def get_recipient_from_user_ids(recipient_profile_ids: Set[int],
not_forged_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in set of recipient_profile_ids.
recipient_profile_ids = set(recipient_profile_ids)
# If the private message is just between the sender and
# another person, force it to be a personal internally
if not_forged_mirror_message:
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profile_ids:
raise ValidationError(_("User not authorized for this query"))
if (len(recipient_profile_ids) == 2 and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
return get_huddle_recipient(recipient_profile_ids)
else:
return get_personal_recipient(list(recipient_profile_ids)[0])
def validate_recipient_user_profiles(user_profiles: List[UserProfile],
sender: UserProfile) -> Set[int]:
recipient_profile_ids = set()
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \
user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (user_profile.email,))
recipient_profile_ids.add(user_profile.id)
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return recipient_profile_ids
def recipient_for_emails(emails: Iterable[str], not_forged_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
user_profiles = user_profiles_from_unvalidated_emails(emails, sender.realm)
return recipient_for_user_profiles(
user_profiles=user_profiles,
not_forged_mirror_message=not_forged_mirror_message,
forwarder_user_profile=forwarder_user_profile,
sender=sender
)
def recipient_for_user_profiles(user_profiles: List[UserProfile], not_forged_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender)
return get_recipient_from_user_ids(recipient_profile_ids, not_forged_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
pub_date__gte=message.pub_date - time_window,
pub_date__lte=message.pub_date + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_recipients(s: Union[str, Iterable[str]]) -> List[str]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s) # type: ignore # This function has a super weird union argument.
except ValueError:
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
recipients = data
# Strip recipients, and then remove any duplicates and any that
# are the empty string after being stripped.
recipients = [recipient.strip() for recipient in recipients]
return list(set(recipient for recipient in recipients if recipient))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str) -> int:
addressee = Addressee.for_stream(stream_name, topic)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Sequence[str], topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str, message_to: Sequence[str],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
return do_schedule_messages([message])[0]
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '%s'" % (stream_name)))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long (limit: %s characters)." % (Stream.MAX_NAME_LENGTH)))
for i in stream_name:
if ord(i) == 0:
raise JsonableError(_("Stream name '%s' contains NULL (0x00) characters." % (stream_name)))
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '%s'" % (group_name)))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: %s characters)"
% (DefaultStreamGroup.MAX_NAME_LENGTH)))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '%s' contains NULL (0x00) characters."
% (group_name)))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(sender: UserProfile,
stream: Optional[Stream],
stream_name: str,
realm: Realm) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
if stream is not None:
num_subscribers = num_subscribers_for_stream_id(stream.id)
if num_subscribers > 0:
return
if stream is None:
error_msg = "that stream does not yet exist. To create it, "
else:
# num_subscribers == 0
error_msg = "there are no subscribers to that stream. To join it, "
content = ("Hi there! We thought you'd like to know that your bot **%s** just "
"tried to send a message to stream `%s`, but %s"
"click the gear in the left-side stream list." %
(sender.full_name, stream_name, error_msg))
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_sender_can_write_to_stream(sender: UserProfile,
stream: Stream,
forwarder_user_profile: Optional[UserProfile]) -> None:
# Our caller is responsible for making sure that `stream` actually
# matches the realm of the sender.
if stream.is_announcement_only:
if not (sender.is_realm_admin or is_cross_realm_bot_email(sender.email)):
raise JsonableError(_("Only organization administrators can send to this stream."))
if not (stream.invite_only or sender.is_guest):
# This is a public stream and sender is not a guest user
return
if subscribed_to_stream(sender, stream.id):
# It is private, but your are subscribed
return
if sender.is_api_super_user:
return
if (forwarder_user_profile is not None and forwarder_user_profile.is_api_super_user):
return
if sender.is_bot and (sender.bot_owner is not None and
subscribed_to_stream(sender.bot_owner, stream.id)):
# Bots can send to any stream their owner can.
return
if sender.email == settings.WELCOME_BOT:
# The welcome bot welcomes folks to the stream.
return
if sender.email == settings.NOTIFICATION_BOT:
return
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
stream_name = addressee.stream_name()
stream_name = stream_name.strip()
check_stream_name(stream_name)
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(sender, stream, stream_name, realm)
except Stream.DoesNotExist:
send_pm_if_empty_stream(sender, None, stream_name, realm)
raise StreamDoesNotExistError(escape(stream_name))
recipient = get_stream_recipient(stream.id)
# This will raise JsonableError if there are problems.
validate_sender_can_write_to_stream(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile
)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
if user_profiles is None or len(user_profiles) == 0:
raise JsonableError(_("Message must have recipients"))
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
not_forged_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles, not_forged_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.pub_date = timestamp_to_datetime(forged_timestamp)
else:
message.pub_date = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
error_msg = check_widget_content(widget_content)
if error_msg:
raise JsonableError(_('Widgets: %s') % (error_msg,))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
if realm is None:
raise RuntimeError("None is not a valid realm for internal_prep_message!")
if addressee.is_stream():
ensure_stream(realm, addressee.stream_name())
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s" % (sender.email, e))
return None
def internal_prep_stream_message(realm: Realm, sender: UserProfile,
stream_name: str, topic: str,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_message(realm: Realm, sender_email: str, recipient_type_name: str,
recipients: str, topic_name: str, content: str,
email_gateway: Optional[bool]=False) -> None:
"""internal_send_message should only be used where `sender_email` is a
system bot."""
# Verify the user is in fact a system bot
assert(is_cross_realm_bot_email(sender_email) or sender_email == settings.ERROR_BOT)
sender = get_system_bot(sender_email)
parsed_recipients = extract_recipients(recipients)
addressee = Addressee.legacy_build(
sender,
recipient_type_name,
parsed_recipients,
topic_name,
realm=realm)
msg = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if msg is None:
return
do_send_messages([msg], email_gateway=email_gateway)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> None:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return
do_send_messages([message])
def internal_send_stream_message(realm: Realm, sender: UserProfile, stream_name: str,
topic: str, content: str) -> None:
message = internal_prep_stream_message(realm, sender, stream_name, topic, content)
if message is None:
return
do_send_messages([message])
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> None:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return
do_send_messages([message])
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))
def validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[], bool]) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed():
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
stream_recipient.populate_for_stream_ids(stream_ids)
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
id_list = ', '.join(str(recipient_id) for recipient_id in recipient_ids)
query = '''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in (%s) AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id
''' % (id_list,)
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=subscription.in_home_view,
invite_only=stream.invite_only,
is_announcement_only=stream.is_announcement_only,
color=subscription.color,
email_address=encode_email_address(stream),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
push_notifications=subscription.push_notifications,
email_notifications=subscription.email_notifications,
description=stream.description,
pin_to_top=subscription.pin_to_top,
is_old_stream=is_old_stream(stream.date_created),
stream_weekly_traffic=get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic),
subscribers=stream_user_ids(stream),
history_public_to_subscribers=stream.history_public_to_subscribers)
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream = defaultdict(list) # type: Dict[int, List[int]]
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]
recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipients) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id,
desktop_notifications=user_profile.enable_stream_desktop_notifications,
audible_notifications=user_profile.enable_stream_sounds,
push_notifications=user_profile.enable_stream_push_notifications,
email_notifications=user_profile.enable_stream_email_notifications,
)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in user_profile.realm.get_admin_users()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=new_user_id)
send_event(stream.realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
sub_ids_to_deactivate = [] # type: List[int]
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any
) -> None:
setattr(sub, property_name, value)
sub.save(update_fields=[property_name])
log_subscription_property_change(user_profile.email, stream.name,
property_name, value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=property_name,
value=value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
)),
{previous_owner.id, })
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id, }
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id, })
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id, }
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> None:
user_profile.api_key = generate_api_key()
user_profile.save(update_fields=["api_key"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=user_profile.api_key,
)),
bot_owner_user_ids(user_profile))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
email=user_profile.email,
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
user_id=user_profile.id
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_delete_avatar_image(user: UserProfile) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_plan_type(user: UserProfile, plan_type: int) -> None:
realm = user.realm
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, acting_user=user, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.save(update_fields=['_max_invites', 'message_visibility_limit'])
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_is_admin(user_profile: UserProfile, value: bool,
permission: str='administer') -> None:
if permission == "administer":
user_profile.is_realm_admin = value
user_profile.save(update_fields=["is_realm_admin"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise AssertionError("Invalid admin permission")
if permission == 'administer':
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_guest(user_profile: UserProfile, value: bool) -> None:
user_profile.is_guest = value
user_profile.save(update_fields=["is_guest"])
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_guest=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_announcement_only(stream: Stream, is_announcement_only: bool) -> None:
stream.is_announcement_only = is_announcement_only
stream.save(update_fields=['is_announcement_only'])
def do_rename_stream(stream: Stream, new_name: str, log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient = get_stream_recipient(stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.save(update_fields=['description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
existing_realm = get_realm(string_id)
if existing_realm is not None:
raise AssertionError("Realm %s already exists!" % (string_id,))
kwargs = {} # type: Dict[str, Any]
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
if settings.BILLING_ENABLED:
kwargs['plan_type'] = Realm.LIMITED
kwargs['message_visibility_limit'] = Realm.MESSAGE_VISIBILITY_LIMITED
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
realm.notifications_stream = notifications_stream
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.")
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
# Send a notification to the admin realm (if configured)
if settings.NOTIFICATION_BOT is not None:
signup_message = "Signups enabled"
admin_realm = get_system_bot(settings.NOTIFICATION_BOT).realm
internal_send_message(admin_realm, settings.NOTIFICATION_BOT, "stream",
"signups", realm.display_subdomain, signup_message)
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str, value: bool,
log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, notification_setting_type,))
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group %s' % (group_name,)))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def set_default_streams(realm: Realm, stream_dict: Dict[str, Dict[str, Any]]) -> None:
DefaultStream.objects.filter(realm=realm).delete()
stream_names = []
for name, options in stream_dict.items():
stream_names.append(name)
stream = ensure_stream(realm,
name,
invite_only = options.get("invite_only", False),
stream_description = options.get("description", ''))
DefaultStream.objects.create(stream=stream, realm=realm)
# Always include the realm's default notifications streams, if it exists
if realm.notifications_stream is not None:
DefaultStream.objects.get_or_create(stream=realm.notifications_stream, realm=realm)
log_event({'type': 'default_streams',
'realm': realm.string_id,
'streams': stream_names})
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id))
)
send_event(realm, event, active_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm))
)
send_event(realm, event, active_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group_name})
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_("Default stream group '%(group_name)s' already exists")
% {'group_name': group_name})
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
if stream in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is already present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is not present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '%s'") % (new_group_name,))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '%s' already exists") % (new_group_name,))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(
realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile: UserProfile,
client: Client,
query: str,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile = user_profile,
client = client,
query = query,
defaults={'last_visit': log_time, 'count': 0})
activity.count += 1
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile: UserProfile, client: Client,
pointer: int, update_flags: bool=False) -> None:
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags: # nocoverage
# This block of code is compatibility code for the
# legacy/original Zulip Android app natively. It's a shim
# that will mark as read any messages up until the pointer
# move; we expect to remove this feature entirely before long,
# when we drop support for the old Android app entirely.
app_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[
UserMessage.where_unread(),
UserMessage.where_active_push_notification(),
]).values_list("message_id", flat=True)
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[UserMessage.where_unread()]) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
do_clear_mobile_push_notifications_for_ids(user_profile, app_message_ids)
event = dict(type='pointer', pointer=pointer)
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(
user_profile=user_profile
).extra(
where=[UserMessage.where_unread()]
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True
)
send_event(user_profile.realm, event, [user_profile.id])
statsd.incr("mark_all_as_read", count)
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids(user_profile, all_push_message_ids)
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile
)
recipient = get_stream_recipient(stream.id)
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()]
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids(user_profile, message_ids)
statsd.incr("mark_stream_as_read", count)
return count
def do_clear_mobile_push_notifications_for_ids(user_profile: UserProfile,
message_ids: List[int]) -> None:
for user_message in UserMessage.objects.filter(
message_id__in=message_ids,
user_profile=user_profile).extra(
where=[UserMessage.where_active_push_notification()]):
event = {
"user_profile_id": user_profile.id,
"message_id": user_message.message_id,
"type": "remove",
}
queue_json_publish("missedmessage_mobile_notifications", event)
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '%s'" % (flag,)))
flagattr = getattr(UserMessage.flags, flag)
assert messages is not None
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# Hack to let you star any message
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
do_clear_mobile_push_notifications_for_ids(user_profile, messages)
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
return Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id).exists()
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body: str) -> str:
return truncate_content(body, MAX_MESSAGE_LENGTH, "...")
def truncate_topic(topic: str) -> str:
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
MessageUpdateUserInfoResult = TypedDict('MessageUpdateUserInfoResult', {
'message_user_ids': Set[int],
'mention_user_ids': Set[int],
})
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message]) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
key = to_dict_cache_key_id(changed_message.id)
value = MessageDict.to_dict_uncached(changed_message)
items_for_remote_cache[key] = (value,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message, topic_name: Optional[str],
propagate_mode: str, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int]) -> int:
event = {'type': 'update_message',
# TODO: We probably want to remove the 'sender' field
# after confirming it isn't used by any consumers.
'sender': user_profile.email,
'user_id': user_profile.id,
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {
'user_id': user_profile.id,
} # type: Dict[str, Any]
changed_messages = [message]
if message.is_stream_message():
stream_id = message.recipient.type_id
event['stream_name'] = Stream.objects.get(id=stream_id).name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
prev_content = edit_history_event['prev_content']
if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):
check_attachment_reference_change(prev_content, message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
# TODO: We may want a slightly leaner of this function for updates.
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if topic_name is not None:
orig_topic_name = message.topic_name()
topic_name = truncate_topic(topic_name)
event["propagate_mode"] = propagate_mode
message.set_topic_name(topic_name)
event["stream_id"] = message.recipient.type_id
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = bugdown.topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
if propagate_mode in ["change_later", "change_all"]:
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
)
changed_messages += messages_list
message.last_edit_time = timezone_now()
assert message.last_edit_time is not None # assert needed because stubs for django are missing
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
return len(changed_messages)
def do_delete_message(user_profile: UserProfile, message: Message) -> None:
message_type = "stream"
if not message.is_stream_message():
message_type = "private"
event = {
'type': 'delete_message',
'sender': user_profile.email,
'message_id': message.id,
'message_type': message_type, } # type: Dict[str, Any]
if message_type == "stream":
event['stream_id'] = message.recipient.type_id
event['topic'] = message.topic_name()
else:
event['recipient_user_ids'] = message.recipient.type_id
ums = [{'id': um.user_profile_id} for um in
UserMessage.objects.filter(message=message.id)]
move_messages_to_archive([message.id])
send_event(user_profile.realm, event, ums)
def do_delete_messages(user: UserProfile) -> None:
message_ids = Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')
if message_ids:
move_messages_to_archive(message_ids)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def is_old_stream(stream_date_created: datetime.datetime) -> bool:
return (timezone_now() - stream_date_created).days \
>= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS
def encode_email_address(stream: Stream) -> str:
return encode_email_address_helper(stream.name, stream.email_token)
def encode_email_address_helper(name: str, email_token: str) -> str:
# Some deployments may not use the email gateway
if settings.EMAIL_GATEWAY_PATTERN == '':
return ''
# Given the fact that we have almost no restrictions on stream names and
# that what characters are allowed in e-mail addresses is complicated and
# dependent on context in the address, we opt for a very simple scheme:
#
# Only encode the stream name (leave the + and token alone). Encode
# everything that isn't alphanumeric plus _ as the percent-prefixed integer
# ordinal of that character, padded with zeroes to the maximum number of
# bytes of a UTF-8 encoded Unicode character.
encoded_name = re.sub(r"\W", lambda x: "%" + str(ord(x.group(0))).zfill(4), name)
encoded_token = "%s+%s" % (encoded_name, email_token)
return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)
def get_email_gateway_message_string_from_address(address: str) -> Optional[str]:
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
if settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
# Accept mails delivered to any Zulip server
pattern_parts[-1] = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK
match_email_re = re.compile("(.*?)".join(pattern_parts))
match = match_email_re.match(address)
if not match:
return None
msg_string = match.group(1)
return msg_string
def decode_email_address(email: str) -> Optional[Tuple[str, str]]:
# Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)
msg_string = get_email_gateway_message_string_from_address(email)
if msg_string is None:
return None
elif '.' in msg_string:
# Workaround for Google Groups and other programs that don't accept emails
# that have + signs in them (see Trac #2102)
encoded_stream_name, token = msg_string.split('.')
else:
encoded_stream_name, token = msg_string.split('+')
stream_name = re.sub(r"%\d{4}", lambda x: chr(int(x.group(0)[1:])), encoded_stream_name)
return stream_name, token
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = [
{'name': stream.name,
'in_home_view': True,
'invite_only': False,
'is_announcement_only': stream.is_announcement_only,
'color': get_next_color(),
'desktop_notifications': True,
'audible_notifications': True,
'push_notifications': False,
'pin_to_top': False,
'stream_id': stream.id,
'description': stream.description,
'is_old_stream': is_old_stream(stream.date_created),
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{}),
'email_address': ''}
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False)]
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
"recipient_id", "in_home_view", "color", "desktop_notifications",
"audible_notifications", "push_notifications", "email_notifications",
"active", "pin_to_top"
).order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids = set() # type: Set[int]
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values("id", "name", "invite_only", "is_announcement_only", "realm_id",
"email_token", "description", "date_created",
"history_public_to_subscribers")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = dict((sub["stream_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient
) # type: Mapping[int, Optional[List[int]]]
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]] # type: Optional[List[int]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
stream_dict = {'name': stream["name"],
'in_home_view': sub["in_home_view"],
'invite_only': stream["invite_only"],
'is_announcement_only': stream["is_announcement_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'push_notifications': sub["push_notifications"],
'email_notifications': sub["email_notifications"],
'pin_to_top': sub["pin_to_top"],
'stream_id': stream["id"],
'description': stream["description"],
'is_old_stream': is_old_stream(stream["date_created"]),
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream["id"],
stream["date_created"],
recent_traffic),
'email_address': encode_email_address_helper(stream["name"], stream["email_token"]),
'history_public_to_subscribers': stream['history_public_to_subscribers']}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {'name': stream['name'],
'invite_only': stream['invite_only'],
'is_announcement_only': stream['is_announcement_only'],
'stream_id': stream['id'],
'is_old_stream': is_old_stream(stream["date_created"]),
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream["id"],
stream["date_created"],
recent_traffic),
'description': stream['description'],
'history_public_to_subscribers': stream['history_public_to_subscribers']}
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(user_profile: UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
user_ids = set()
for subs in [subscribed, unsubscribed, never_subscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([email_dict[user_id] for user_id in sub['subscribers']])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags = user_flags.get(user_id, []) # type: Iterable[str]
mentioned = 'mentioned' in flags
private_message = is_pm and user_id != sender_id
if mentioned or private_message:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
if not user_ids:
return []
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
recent = timezone_now() - datetime.timedelta(seconds=140)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent
).distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def get_status_dict(requesting_user_profile: UserProfile) -> Dict[str, Dict[str, Dict[str, Any]]]:
if requesting_user_profile.realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(list(settings.CROSS_REALM_BOT_EMAILS), None,
base_query=UserProfile.objects.filter(
realm__string_id=settings.SYSTEM_BOT_REALM)).values()
return [{'email': user.email,
'user_id': user.id,
'is_admin': user.is_realm_admin,
'is_bot': user.is_bot,
'avatar_url': avatar_url(user),
'timezone': user.timezone,
'date_joined': user.date_joined.isoformat(),
'full_name': user.full_name}
for user in users
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id == settings.SYSTEM_BOT_REALM]
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> None:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = "%s (via Zulip)" % (referrer.full_name,)
send_email('zerver/emails/invitation', to_email=invitee.email, from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(), context=context)
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
raise ValidationError('%s is an email address reserved for system bots' % (email,))
def validate_email_for_realm(target_realm: Realm, email: str) -> None:
email_not_system_bot(email)
try:
existing_user_profile = get_user(email, target_realm)
except UserProfile.DoesNotExist:
return
if existing_user_profile.is_active:
if existing_user_profile.is_mirror_dummy:
raise AssertionError("Mirror dummy user is already active!")
# Other users should not already exist at all.
raise ValidationError('%s already has an account' % (email,))
elif not existing_user_profile.is_mirror_dummy:
raise ValidationError('The account for %s has been deactivated' % (email,))
def validate_email(user_profile: UserProfile, email: str) -> Tuple[Optional[str], Optional[str]]:
try:
validators.validate_email(email)
except ValidationError:
return _("Invalid address."), None
try:
email_allowed_for_realm(email, user_profile.realm)
except DomainNotAllowedForRealmError:
return _("Outside your domain."), None
except DisposableEmailError:
return _("Please use your real email address."), None
except EmailContainsPlusError:
return _("Email addresses containing + are not allowed."), None
try:
validate_email_for_realm(user_profile.realm, email)
except ValidationError:
return None, _("Already has an account.")
return None, None
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str]], sent_invitations: bool) -> None:
self._msg = msg # type: str
self.errors = errors # type: List[Tuple[str, str]]
self.sent_invitations = sent_invitations # type: bool
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days)
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact %s to have your limit raised. "
"No invitations were sent.") % (settings.ZULIP_ADMINISTRATOR,)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as_admin: Optional[bool]=False) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
validated_emails = [] # type: List[str]
errors = [] # type: List[Tuple[str, str]]
skipped = [] # type: List[Tuple[str, str]]
for email in invitee_emails:
if email == '':
continue
email_error, email_skipped = validate_email(user_profile, email)
if not (email_error or email_skipped):
validated_emails.append(email)
elif email_error:
errors.append((email, email_error))
elif email_skipped:
skipped.append((email, email_skipped))
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as_admin=invite_as_admin,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
days_to_activate = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', 7)
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', 1)
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
prereg_users = PreregistrationUser.objects.exclude(status=active_value).filter(
invited_at__gte=lowest_datetime,
referred_by__realm=user_profile.realm)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
ref=invitee.referred_by.email,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as_admin=invitee.invited_as_admin))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, streams: Optional[List[Stream]]=[]) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
return create_confirmation_link(invite, realm.host, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actaully want to set the confirmation objects
# to a "revoked" status so that we can give the user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str) -> None:
add_topic_mute(user_profile, stream.id, recipient.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = [(row.to_dict()) for row in query]
return streams
def do_get_streams(user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
).select_related('recipient')
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
if include_public:
invite_only_check = Q(invite_only=False)
if include_subscribed and include_public:
query = query.filter(recipient_check | invite_only_check)
elif include_public:
query = query.filter(invite_only_check)
elif include_subscribed:
query = query.filter(recipient_check)
else:
# We're including nothing, so don't bother hitting the DB.
query = []
streams = [(row.to_dict()) for row in query]
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message) -> None:
attachment_url_list = attachment_url_re.findall(message.content)
for url in attachment_url_list:
path_id = attachment_url_to_path_id(url)
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % (
user_profile.id, path_id, message.id))
continue
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(prev_content: str, message: Message) -> None:
new_content = message.content
prev_attachments = set(attachment_url_re.findall(prev_content))
new_attachments = set(attachment_url_re.findall(new_content))
to_remove = list(prev_attachments - new_attachments)
path_ids = []
for url in to_remove:
path_id = attachment_url_to_path_id(url)
path_ids.append(path_id)
attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message)
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: ProfileFieldData=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if field.field_type == CustomProfileField.CHOICE:
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: ProfileFieldData=None) -> None:
field.name = name
field.hint = hint
if field.field_type == CustomProfileField.CHOICE:
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = dict((_[1], _[0]) for _ in enumerate(order))
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
if field['type'] == CustomProfileField.USER:
field_value = ujson.dumps(field['value']) # type: Union[int, str, List[int], None]
else:
field_value = field['value']
payload = dict(user_id=user_profile.id, custom_profile_field=dict(id=field['id'],
value=field_value))
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]]) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.update_or_create(
user_profile=user_profile,
field_id=field['id'],
defaults={'value': field['value']})
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"type": field_value.field.field_type})
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists." % (name,)))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists." % (name,)))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token,)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: str) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts = [] # type: List[Dict[str, Any]]
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid = defaultdict(list) # type: Dict[int, List[Service]]
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid = {} # type: Dict[int, List[Dict[str, Any]]]
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts = [] # type: List[Dict[str, Any]]
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def missing_any_realm_internal_bots() -> bool:
bot_emails = [bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,)
for bot in settings.REALM_INTERNAL_BOTS]
bot_counts = dict(UserProfile.objects.filter(email__in=bot_emails)
.values_list('email')
.annotate(Count('id')))
realm_count = Realm.objects.count()
return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)
| [
"MutableMapping[str, Any]",
"Stream",
"int",
"Stream",
"UserProfile",
"Realm",
"UserProfile",
"UserProfile",
"str",
"UserProfile",
"UserProfile",
"UserProfile",
"UserProfile",
"Iterable[Stream]",
"UserProfile",
"UserProfile",
"UserProfile",
"Optional[Stream]",
"UserProfile",
"Realm",
"Iterable[Tuple[str, str]]",
"str",
"Optional[str]",
"Realm",
"str",
"str",
"UserProfile",
"UserProfile",
"Realm",
"Realm",
"str",
"Any",
"Realm",
"Dict[str, bool]",
"Realm",
"bool",
"int",
"bool",
"Realm",
"int",
"Realm",
"Stream",
"int",
"Realm",
"Stream",
"int",
"Realm",
"Realm",
"Realm",
"str",
"Realm",
"UserProfile",
"Stream",
"UserProfile",
"str",
"UserProfile",
"str",
"NonBinaryStr",
"NonBinaryStr",
"Realm",
"str",
"Callable[[str], str]",
"MutableMapping[str, Any]",
"Message",
"str",
"Set[int]",
"Realm",
"Recipient",
"int",
"Recipient",
"int",
"Optional[StreamTopicTarget]",
"Callable[[Dict[str, Any]], bool]",
"Dict[str, Any]",
"UserProfile",
"List[Tuple[int, int]]",
"Set[int]",
"Set[int]",
"int",
"int",
"int",
"Sequence[Mapping[str, Any]]",
"Sequence[Optional[MutableMapping[str, Any]]]",
"int",
"int",
"int",
"Message",
"Set[int]",
"Set[int]",
"Set[int]",
"Set[int]",
"Set[int]",
"List[UserMessageLite]",
"Realm",
"int",
"int",
"str",
"str",
"UserProfile",
"Message",
"Reaction",
"str",
"UserProfile",
"Message",
"str",
"UserProfile",
"Message",
"str",
"UserProfile",
"Message",
"str",
"str",
"str",
"UserProfile",
"Message",
"str",
"str",
"Realm",
"Dict[str, Any]",
"UserProfile",
"Sequence[str]",
"str",
"UserProfile",
"Sequence[str]",
"str",
"Stream",
"Stream",
"Stream",
"List[int]",
"Realm",
"bool",
"Optional[bool]",
"Realm",
"str",
"Realm",
"str",
"Realm",
"List[Mapping[str, Any]]",
"Set[int]",
"bool",
"Optional[UserProfile]",
"UserProfile",
"List[UserProfile]",
"UserProfile",
"Iterable[str]",
"bool",
"Optional[UserProfile]",
"UserProfile",
"List[UserProfile]",
"bool",
"Optional[UserProfile]",
"UserProfile",
"Message",
"Union[str, Iterable[str]]",
"UserProfile",
"Client",
"str",
"str",
"str",
"UserProfile",
"Client",
"UserProfile",
"str",
"UserProfile",
"Client",
"str",
"Sequence[str]",
"Optional[str]",
"str",
"UserProfile",
"Client",
"str",
"Sequence[str]",
"Optional[str]",
"str",
"str",
"datetime.datetime",
"str",
"str",
"UserProfile",
"Realm",
"str",
"UserProfile",
"Optional[Stream]",
"str",
"Realm",
"UserProfile",
"Stream",
"Optional[UserProfile]",
"UserProfile",
"Client",
"Addressee",
"str",
"Realm",
"UserProfile",
"Addressee",
"str",
"Realm",
"UserProfile",
"str",
"str",
"str",
"Realm",
"UserProfile",
"UserProfile",
"str",
"Realm",
"str",
"str",
"str",
"str",
"str",
"Realm",
"UserProfile",
"UserProfile",
"str",
"Realm",
"UserProfile",
"str",
"str",
"str",
"Realm",
"UserProfile",
"List[str]",
"str",
"UserProfile",
"Iterable[Subscription]",
"Optional[UserProfile]",
"Stream",
"Optional[UserProfile]",
"Mapping[str, Any]",
"Callable[[], bool]",
"Iterable[Mapping[str, Any]]",
"UserProfile",
"Mapping[int, bool]",
"StreamRecipientMap",
"Stream",
"Optional[UserProfile]",
"Stream",
"UserProfile",
"Iterable[Tuple[Subscription, Stream]]",
"Callable[[Stream], List[int]]",
"Dict[int, int]",
"Stream",
"Iterable[int]",
"Iterable[int]",
"Iterable[Stream]",
"Iterable[Stream]",
"Iterable[UserProfile]",
"Stream",
"UserProfile",
"Iterable[Stream]",
"Iterable[UserProfile]",
"Iterable[Stream]",
"Client",
"Stream",
"str",
"str",
"str",
"Any",
"UserProfile",
"Subscription",
"Stream",
"str",
"Any",
"UserProfile",
"str",
"UserProfile",
"str",
"Optional[UserProfile]",
"UserProfile",
"str",
"UserProfile",
"UserProfile",
"str",
"UserProfile",
"UserProfile",
"UserProfile",
"UserProfile",
"UserProfile",
"str",
"UserProfile",
"UserProfile",
"UserProfile",
"str",
"UserProfile",
"Realm",
"str",
"UserProfile",
"int",
"UserProfile",
"Optional[Stream]",
"UserProfile",
"Optional[Stream]",
"UserProfile",
"bool",
"UserProfile",
"bool",
"UserProfile",
"bool",
"Stream",
"bool",
"Stream",
"bool",
"Stream",
"bool",
"Stream",
"str",
"Stream",
"str",
"str",
"str",
"UserProfile",
"str",
"bool",
"UserProfile",
"bool",
"UserProfile",
"str",
"Union[bool, str]",
"List[str]",
"Realm",
"Realm",
"Dict[str, Dict[str, Any]]",
"Realm",
"Realm",
"Stream",
"Stream",
"Realm",
"str",
"str",
"List[Stream]",
"Realm",
"DefaultStreamGroup",
"List[Stream]",
"Realm",
"DefaultStreamGroup",
"List[Stream]",
"Realm",
"DefaultStreamGroup",
"str",
"Realm",
"DefaultStreamGroup",
"str",
"Realm",
"DefaultStreamGroup",
"int",
"UserProfile",
"List[Stream]",
"List[DefaultStreamGroup]",
"UserProfile",
"datetime.datetime",
"UserProfile",
"Client",
"str",
"datetime.datetime",
"UserProfile",
"UserPresence",
"Client",
"UserProfile",
"Client",
"datetime.datetime",
"int",
"UserProfile",
"datetime.datetime",
"UserProfile",
"Client",
"datetime.datetime",
"int",
"bool",
"UserProfile",
"Client",
"int",
"UserProfile",
"Client",
"UserProfile",
"Client",
"Stream",
"UserProfile",
"List[int]",
"UserProfile",
"Client",
"str",
"str",
"List[int]",
"UserProfile",
"int",
"str",
"int",
"str",
"str",
"str",
"int",
"Message",
"Iterable[UserMessage]",
"UserMessage",
"bool",
"int",
"List[Message]",
"UserProfile",
"Message",
"Optional[str]",
"Optional[str]",
"UserMessage",
"UserProfile",
"Message",
"Optional[str]",
"str",
"Optional[str]",
"Optional[str]",
"Set[int]",
"Set[int]",
"UserMessage",
"UserProfile",
"Message",
"UserProfile",
"Set[int]",
"int",
"int",
"datetime.datetime",
"Dict[int, int]",
"datetime.datetime",
"Stream",
"str",
"str",
"str",
"str",
"Realm",
"UserProfile",
"UserProfile",
"Realm",
"int",
"str",
"Set[int]",
"Dict[int, List[str]]",
"Set[int]",
"UserProfile",
"PreregistrationUser",
"UserProfile",
"str",
"Realm",
"str",
"UserProfile",
"str",
"str",
"List[Tuple[str, str]]",
"bool",
"Iterable[Realm]",
"int",
"Realm",
"int",
"UserProfile",
"SizedTextIterable",
"Iterable[Stream]",
"UserProfile",
"UserProfile",
"PreregistrationUser",
"PreregistrationUser",
"Realm",
"Realm",
"str",
"UserProfile",
"File",
"Realm",
"str",
"UserProfile",
"Iterable[str]",
"UserProfile",
"Iterable[str]",
"UserProfile",
"Iterable[str]",
"UserProfile",
"List[str]",
"UserProfile",
"Stream",
"Recipient",
"str",
"UserProfile",
"Stream",
"str",
"UserProfile",
"str",
"Realm",
"Realm",
"str",
"str",
"Realm",
"Sequence[int]",
"Realm",
"str",
"bool",
"RealmDomain",
"bool",
"RealmDomain",
"Realm",
"Realm",
"UserProfile",
"UserProfile",
"str",
"Dict[str, Any]",
"Message",
"int",
"str",
"Message",
"Realm",
"str",
"Realm",
"str",
"int",
"Realm",
"CustomProfileField",
"Realm",
"Realm",
"CustomProfileField",
"str",
"Realm",
"List[int]",
"UserProfile",
"Dict[str, Union[int, str, List[int], None]]",
"UserProfile",
"List[Dict[str, Union[int, str, List[int]]]]",
"UserGroup",
"List[UserProfile]",
"Realm",
"str",
"List[UserProfile]",
"str",
"UserGroup",
"Dict[str, Any]",
"UserGroup",
"str",
"UserGroup",
"str",
"UserProfile",
"int",
"str",
"UserProfile",
"Dict[str, str]",
"str",
"List[Dict[str, Any]]",
"Realm",
"UserProfile",
"str",
"UserGroup",
"List[int]",
"UserGroup",
"List[UserProfile]",
"UserGroup",
"List[UserProfile]",
"Realm",
"int",
"int",
"int",
"UserProfile"
] | [
6848,
7464,
7994,
8276,
8645,
9170,
9322,
9439,
9493,
9536,
11324,
11550,
12031,
12053,
13818,
17174,
18026,
18086,
19374,
19541,
19559,
19856,
19871,
19893,
19911,
19947,
22370,
23325,
24286,
24435,
24448,
24460,
25061,
25132,
25680,
25743,
25818,
25887,
26777,
26856,
27323,
27338,
27357,
27710,
27725,
27789,
28142,
29225,
29510,
29532,
29653,
30406,
32279,
34152,
34176,
34954,
34978,
35789,
35903,
36133,
36147,
36204,
36803,
37434,
37480,
37523,
37568,
38250,
38272,
39481,
39526,
39568,
43122,
43388,
45201,
45234,
45304,
45331,
45384,
45694,
45709,
47542,
48767,
60074,
60091,
60103,
60363,
60419,
60479,
60536,
60594,
60649,
63082,
63743,
63783,
63822,
63859,
63895,
64509,
64531,
64577,
64591,
65981,
66003,
66024,
66452,
66474,
66495,
66826,
66848,
66889,
66906,
66926,
67256,
67278,
67322,
67342,
67746,
67767,
68802,
68832,
68892,
69262,
69292,
69347,
70007,
70262,
70670,
70688,
70917,
70945,
70990,
71661,
71709,
73047,
73085,
73419,
73469,
74390,
74459,
74521,
74584,
75548,
75612,
76494,
76536,
76591,
76647,
77025,
77071,
77133,
77196,
77516,
78377,
79172,
79193,
79214,
79256,
79267,
79481,
79502,
79557,
79576,
79880,
79901,
79928,
79968,
79995,
80050,
80881,
80902,
80956,
80973,
81027,
81059,
81106,
81123,
81791,
82273,
82886,
82957,
83024,
84302,
84351,
84410,
84450,
85491,
85552,
85624,
87048,
87069,
87088,
87138,
90916,
90958,
91009,
91056,
92082,
92097,
92156,
92168,
92215,
92566,
92615,
92678,
92734,
93079,
93100,
93126,
93169,
93186,
93200,
94033,
94082,
94145,
94201,
94411,
94426,
94452,
94497,
94511,
94724,
94739,
94760,
94813,
95123,
95142,
95594,
95665,
96440,
96523,
96612,
98615,
98691,
98747,
98818,
101166,
101191,
102039,
102405,
102460,
102547,
102625,
104402,
104468,
104544,
105673,
107047,
107099,
112816,
116009,
116031,
116716,
116778,
116841,
120526,
122442,
122460,
122475,
122524,
122794,
122812,
122870,
122893,
122905,
123523,
123546,
124009,
124033,
124075,
125070,
125098,
125143,
125599,
125627,
125676,
126194,
126218,
126268,
128219,
128245,
128714,
128740,
129597,
129625,
131046,
131200,
131220,
131860,
131884,
132910,
132931,
133898,
133964,
134967,
134987,
135840,
135860,
136665,
136685,
137113,
137134,
137641,
137664,
137817,
137847,
138003,
138021,
140303,
140328,
140721,
140732,
142535,
142554,
142566,
143594,
143620,
143778,
143837,
143889,
145013,
145064,
145565,
145585,
146459,
146717,
146988,
147312,
147552,
147571,
147624,
147638,
148458,
148472,
148544,
149303,
149317,
149394,
149842,
149856,
149932,
150411,
150425,
150509,
150663,
150677,
150810,
151030,
151381,
151560,
151749,
151809,
153162,
153211,
153254,
153297,
153685,
153708,
154059,
154491,
154540,
154586,
154641,
157028,
157051,
157278,
157299,
157317,
157369,
157390,
157746,
157767,
157806,
159229,
159250,
160204,
160261,
160313,
161407,
161480,
161981,
162030,
162077,
162116,
162159,
164153,
164177,
164407,
164424,
164449,
164626,
164727,
165001,
165941,
165955,
166210,
166235,
166247,
166978,
167620,
167670,
167716,
167777,
168501,
168846,
168868,
168889,
168942,
168956,
169011,
169050,
169100,
174106,
174380,
174402,
175142,
175365,
175955,
176115,
176141,
176214,
176868,
177038,
177165,
177183,
178094,
178614,
179396,
180821,
187355,
188108,
188164,
188221,
188281,
188341,
189165,
189762,
190985,
191047,
191694,
191884,
191898,
192527,
192547,
193400,
193413,
193454,
193666,
193692,
194114,
194135,
195671,
195720,
195768,
198924,
199780,
200127,
200772,
201430,
201634,
201673,
201712,
201763,
202491,
202504,
202739,
202759,
202938,
202964,
203133,
203159,
203328,
203354,
203509,
203530,
203549,
203567,
203824,
203845,
203860,
204106,
204128,
204354,
204923,
204939,
204963,
205320,
205658,
205849,
205864,
205887,
206384,
206415,
206864,
207559,
208082,
208302,
210083,
210100,
210151,
210379,
211831,
212098,
212112,
212801,
212819,
213124,
213137,
213154,
213724,
213738,
214025,
214151,
214165,
214233,
214618,
214632,
215134,
215197,
215816,
215874,
216463,
216483,
216966,
216979,
217001,
217058,
217412,
217429,
217659,
217676,
218012,
218036,
218274,
218345,
218410,
219300,
219356,
219970,
220912,
220972,
222602,
223899,
223960,
224025,
224316,
224377,
224827,
224888,
225218,
225240,
225291,
225512,
225531
] | [
6872,
7470,
7997,
8282,
8656,
9175,
9333,
9450,
9496,
9547,
11335,
11561,
12042,
12069,
13829,
17185,
18037,
18102,
19385,
19546,
19584,
19859,
19884,
19898,
19914,
19950,
22381,
23336,
24291,
24440,
24451,
24463,
25066,
25147,
25685,
25747,
25821,
25891,
26782,
26859,
27328,
27344,
27360,
27715,
27731,
27792,
28147,
29230,
29515,
29535,
29658,
30417,
32285,
34163,
34179,
34965,
34981,
35801,
35915,
36138,
36150,
36224,
36827,
37441,
37483,
37531,
37573,
38259,
38275,
39490,
39529,
39595,
43154,
43402,
45212,
45255,
45312,
45339,
45387,
45697,
45712,
47569,
48811,
60077,
60094,
60106,
60370,
60427,
60487,
60544,
60602,
60657,
63103,
63748,
63786,
63825,
63862,
63898,
64520,
64538,
64585,
64594,
65992,
66010,
66027,
66463,
66481,
66498,
66837,
66855,
66892,
66909,
66929,
67267,
67285,
67325,
67345,
67751,
67781,
68813,
68845,
68895,
69273,
69305,
69350,
70013,
70268,
70676,
70697,
70922,
70949,
71004,
71666,
71712,
73052,
73088,
73424,
73492,
74398,
74463,
74542,
74595,
75565,
75623,
76507,
76540,
76612,
76658,
77042,
77075,
77154,
77207,
77523,
78402,
79183,
79199,
79217,
79259,
79270,
79492,
79508,
79568,
79579,
79891,
79907,
79931,
79981,
80008,
80053,
80892,
80908,
80959,
80986,
81040,
81062,
81109,
81140,
81794,
82276,
82897,
82962,
83027,
84313,
84367,
84413,
84455,
85502,
85558,
85645,
87059,
87075,
87097,
87141,
90921,
90969,
91018,
91059,
92087,
92108,
92159,
92171,
92218,
92571,
92626,
92689,
92737,
93084,
93103,
93129,
93172,
93189,
93203,
94038,
94093,
94156,
94204,
94416,
94437,
94455,
94500,
94514,
94729,
94750,
94769,
94816,
95134,
95164,
95615,
95671,
96461,
96540,
96630,
98642,
98702,
98765,
98836,
101172,
101212,
102045,
102416,
102497,
102576,
102639,
104408,
104481,
104557,
105689,
107063,
107120,
112822,
116020,
116047,
116737,
116794,
116847,
120532,
122445,
122463,
122478,
122527,
122805,
122824,
122876,
122896,
122908,
123534,
123549,
124020,
124036,
124096,
125081,
125101,
125154,
125610,
125630,
125687,
126205,
126229,
126279,
128230,
128248,
128725,
128751,
129608,
129628,
131057,
131205,
131223,
131871,
131887,
132921,
132947,
133909,
133980,
134978,
134991,
135851,
135864,
136676,
136689,
137119,
137138,
137647,
137668,
137823,
137851,
138009,
138024,
140309,
140331,
140724,
140735,
142546,
142557,
142570,
143605,
143624,
143789,
143840,
143905,
145022,
145069,
145570,
145610,
146464,
146722,
146994,
147318,
147557,
147574,
147627,
147650,
148463,
148490,
148556,
149308,
149335,
149406,
149847,
149874,
149935,
150416,
150443,
150512,
150668,
150695,
150813,
151041,
151393,
151584,
151760,
151826,
153173,
153217,
153257,
153314,
153696,
153720,
154065,
154502,
154546,
154603,
154644,
157039,
157068,
157289,
157305,
157334,
157372,
157394,
157757,
157773,
157809,
159240,
159256,
160215,
160267,
160319,
161418,
161489,
161992,
162036,
162080,
162119,
162168,
164164,
164180,
164410,
164427,
164452,
164629,
164730,
165004,
165948,
165976,
166221,
166239,
166250,
166991,
167631,
167677,
167729,
167790,
168512,
168857,
168875,
168902,
168945,
168969,
169024,
169058,
169108,
174117,
174391,
174409,
175153,
175373,
175958,
176118,
176158,
176228,
176885,
177044,
177168,
177186,
178097,
178617,
179401,
180832,
187366,
188113,
188167,
188224,
188289,
188361,
189173,
189773,
191004,
191058,
191697,
191889,
191901,
192538,
192550,
193403,
193434,
193458,
193681,
193695,
194119,
194138,
195682,
195737,
195784,
198935,
199791,
200146,
200791,
201435,
201639,
201676,
201723,
201767,
202496,
202507,
202750,
202772,
202949,
202977,
203144,
203172,
203339,
203363,
203520,
203536,
203558,
203570,
203835,
203851,
203863,
204117,
204131,
204359,
204928,
204942,
204966,
205325,
205671,
205854,
205867,
205891,
206395,
206419,
206875,
207564,
208087,
208313,
210094,
210103,
210165,
210386,
211834,
212101,
212119,
212806,
212822,
213129,
213140,
213157,
213729,
213756,
214030,
214156,
214183,
214236,
214623,
214641,
215145,
215240,
215827,
215917,
216472,
216500,
216971,
216982,
217018,
217061,
217421,
217443,
217668,
217679,
218021,
218039,
218285,
218348,
218413,
219311,
219370,
219973,
220932,
220977,
222613,
223902,
223969,
224034,
224325,
224394,
224836,
224905,
225223,
225243,
225294,
225515,
225542
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/addressee.py |
from typing import Iterable, List, Optional, Sequence
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import JsonableError
from zerver.models import (
Realm,
UserProfile,
get_user_including_cross_realm,
)
def raw_pm_with_emails(email_str: str, my_email: str) -> List[str]:
frags = email_str.split(',')
emails = [s.strip().lower() for s in frags]
emails = [email for email in emails if email]
if len(emails) > 1:
emails = [email for email in emails if email != my_email.lower()]
return emails
def user_profiles_from_unvalidated_emails(emails: Iterable[str], realm: Realm) -> List[UserProfile]:
user_profiles = [] # type: List[UserProfile]
for email in emails:
try:
user_profile = get_user_including_cross_realm(email, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
user_profiles.append(user_profile)
return user_profiles
def get_user_profiles(emails: Iterable[str], realm: Realm) -> List[UserProfile]:
try:
return user_profiles_from_unvalidated_emails(emails, realm)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
class Addressee:
# This is really just a holder for vars that tended to be passed
# around in a non-type-safe way before this class was introduced.
#
# It also avoids some nonsense where you have to think about whether
# topic should be None or '' for a PM, or you have to make an array
# of one stream.
#
# Eventually we can use this to cache Stream and UserProfile objects
# in memory.
#
# This should be treated as an immutable class.
def __init__(self, msg_type: str,
user_profiles: Optional[Sequence[UserProfile]]=None,
stream_name: Optional[str]=None,
topic: Optional[str]=None) -> None:
assert(msg_type in ['stream', 'private'])
self._msg_type = msg_type
self._user_profiles = user_profiles
self._stream_name = stream_name
self._topic = topic
def is_stream(self) -> bool:
return self._msg_type == 'stream'
def is_private(self) -> bool:
return self._msg_type == 'private'
def user_profiles(self) -> List[UserProfile]:
assert(self.is_private())
return self._user_profiles # type: ignore # assertion protects us
def stream_name(self) -> str:
assert(self.is_stream())
assert(self._stream_name is not None)
return self._stream_name
def topic(self) -> str:
assert(self.is_stream())
assert(self._topic is not None)
return self._topic
@staticmethod
def legacy_build(sender: UserProfile,
message_type_name: str,
message_to: Sequence[str],
topic_name: str,
realm: Optional[Realm]=None) -> 'Addressee':
# For legacy reason message_to used to be either a list of
# emails or a list of streams. We haven't fixed all of our
# callers yet.
if realm is None:
realm = sender.realm
if message_type_name == 'stream':
if len(message_to) > 1:
raise JsonableError(_("Cannot send to multiple streams"))
if message_to:
stream_name = message_to[0]
else:
# This is a hack to deal with the fact that we still support
# default streams (and the None will be converted later in the
# callpath).
if sender.default_sending_stream:
# Use the users default stream
stream_name = sender.default_sending_stream.name
else:
raise JsonableError(_('Missing stream'))
return Addressee.for_stream(stream_name, topic_name)
elif message_type_name == 'private':
emails = message_to
return Addressee.for_private(emails, realm)
else:
raise JsonableError(_("Invalid message type"))
@staticmethod
def for_stream(stream_name: str, topic: str) -> 'Addressee':
if topic is None:
raise JsonableError(_("Missing topic"))
topic = topic.strip()
if topic == "":
raise JsonableError(_("Topic can't be empty"))
return Addressee(
msg_type='stream',
stream_name=stream_name,
topic=topic,
)
@staticmethod
def for_private(emails: Sequence[str], realm: Realm) -> 'Addressee':
user_profiles = get_user_profiles(emails, realm)
return Addressee(
msg_type='private',
user_profiles=user_profiles,
)
@staticmethod
def for_user_profile(user_profile: UserProfile) -> 'Addressee':
user_profiles = [user_profile]
return Addressee(
msg_type='private',
user_profiles=user_profiles,
)
| [
"str",
"str",
"Iterable[str]",
"Realm",
"Iterable[str]",
"Realm",
"str",
"UserProfile",
"str",
"Sequence[str]",
"str",
"str",
"str",
"Sequence[str]",
"Realm",
"UserProfile"
] | [
380,
395,
714,
736,
1135,
1157,
1901,
2913,
2966,
3004,
3052,
4345,
4357,
4745,
4767,
5014
] | [
383,
398,
727,
741,
1148,
1162,
1904,
2924,
2969,
3017,
3055,
4348,
4360,
4758,
4772,
5025
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/alert_words.py |
from django.db.models import Q
from zerver.models import UserProfile, Realm
from zerver.lib.cache import cache_with_key, realm_alert_words_cache_key
import ujson
from typing import Dict, Iterable, List
@cache_with_key(realm_alert_words_cache_key, timeout=3600*24)
def alert_words_in_realm(realm: Realm) -> Dict[int, List[str]]:
users_query = UserProfile.objects.filter(realm=realm, is_active=True)
alert_word_data = users_query.filter(~Q(alert_words=ujson.dumps([]))).values('id', 'alert_words')
all_user_words = dict((elt['id'], ujson.loads(elt['alert_words'])) for elt in alert_word_data)
user_ids_with_words = dict((user_id, w) for (user_id, w) in all_user_words.items() if len(w))
return user_ids_with_words
def user_alert_words(user_profile: UserProfile) -> List[str]:
return ujson.loads(user_profile.alert_words)
def add_user_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> List[str]:
words = user_alert_words(user_profile)
new_words = [w for w in alert_words if w not in words]
words.extend(new_words)
set_user_alert_words(user_profile, words)
return words
def remove_user_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> List[str]:
words = user_alert_words(user_profile)
words = [w for w in words if w not in alert_words]
set_user_alert_words(user_profile, words)
return words
def set_user_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:
user_profile.alert_words = ujson.dumps(alert_words)
user_profile.save(update_fields=['alert_words'])
| [
"Realm",
"UserProfile",
"UserProfile",
"Iterable[str]",
"UserProfile",
"Iterable[str]",
"UserProfile",
"List[str]"
] | [
298,
770,
886,
912,
1180,
1206,
1438,
1464
] | [
303,
781,
897,
925,
1191,
1219,
1449,
1473
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/api_test_helpers.py | from typing import Dict, Any, Optional, Iterable
from io import StringIO
import json
import os
from zerver.lib import mdiff
from zerver.lib.openapi import validate_against_openapi_schema
if False:
from zulip import Client
ZULIP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
FIXTURE_PATH = os.path.join(ZULIP_DIR, 'templates', 'zerver', 'api', 'fixtures.json')
def load_api_fixtures():
# type: () -> Dict[str, Any]
with open(FIXTURE_PATH, 'r') as fp:
json_dict = json.loads(fp.read())
return json_dict
FIXTURES = load_api_fixtures()
def add_subscriptions(client):
# type: (Client) -> None
# {code_example|start}
# Subscribe to the stream "new stream"
result = client.add_subscriptions(
streams=[
{
'name': 'new stream',
'description': 'New stream for testing'
}
]
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'200_without_principals')
# {code_example|start}
# To subscribe another user to a stream, you may pass in
# the `principals` argument, like so:
result = client.add_subscriptions(
streams=[
{'name': 'new stream', 'description': 'New stream for testing'}
],
principals=['newbie@zulip.com']
)
# {code_example|end}
assert result['result'] == 'success'
assert 'newbie@zulip.com' in result['subscribed']
def test_add_subscriptions_already_subscribed(client):
# type: (Client) -> None
result = client.add_subscriptions(
streams=[
{'name': 'new stream', 'description': 'New stream for testing'}
],
principals=['newbie@zulip.com']
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'200_already_subscribed')
def test_authorization_errors_fatal(client, nonadmin_client):
# type: (Client, Client) -> None
client.add_subscriptions(
streams=[
{'name': 'private_stream'}
],
)
stream_id = client.get_stream_id('private_stream')['stream_id']
client.call_endpoint(
'streams/{}'.format(stream_id),
method='PATCH',
request={'is_private': True}
)
result = nonadmin_client.add_subscriptions(
streams=[
{'name': 'private_stream'}
],
authorization_errors_fatal=False,
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'400_unauthorized_errors_fatal_false')
result = nonadmin_client.add_subscriptions(
streams=[
{'name': 'private_stream'}
],
authorization_errors_fatal=True,
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'400_unauthorized_errors_fatal_true')
def get_user_presence(client):
# type: (Client) -> None
# {code_example|start}
# Get presence information for "iago@zulip.com"
result = client.get_user_presence('iago@zulip.com')
# {code_example|end}
validate_against_openapi_schema(result, '/users/{email}/presence', 'get', '200')
def create_user(client):
# type: (Client) -> None
# {code_example|start}
# Create a user
request = {
'email': 'newbie@zulip.com',
'password': 'temp',
'full_name': 'New User',
'short_name': 'newbie'
}
result = client.create_user(request)
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'post', '200')
# Test "Email already used error"
result = client.create_user(request)
validate_against_openapi_schema(result, '/users', 'post', '400')
def get_members(client):
# type: (Client) -> None
# {code_example|start}
# Get all users in the realm
result = client.get_members()
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'get', '200')
members = [m for m in result['members'] if m['email'] == 'newbie@zulip.com']
assert len(members) == 1
newbie = members[0]
assert not newbie['is_admin']
assert newbie['full_name'] == 'New User'
# {code_example|start}
# You may pass the `client_gravatar` query parameter as follows:
result = client.get_members({'client_gravatar': True})
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'get', '200')
assert result['members'][0]['avatar_url'] is None
def get_realm_filters(client):
# type: (Client) -> None
# {code_example|start}
# Fetch all the filters in this organization
result = client.get_realm_filters()
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters', 'get', '200')
def add_realm_filter(client):
# type: (Client) -> None
# {code_example|start}
# Add a filter to automatically linkify #<number> to the corresponding
# issue in Zulip's server repo
result = client.add_realm_filter('#(?P<id>[0-9]+)',
'https://github.com/zulip/zulip/issues/%(id)s')
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters', 'post', '200')
def remove_realm_filter(client):
# type: (Client) -> None
# {code_example|start}
# Remove the organization filter with ID 42
result = client.remove_realm_filter(42)
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters/<filter_id>', 'delete', '200')
def get_profile(client):
# type: (Client) -> None
# {code_example|start}
# Get the profile of the user/bot that requests this endpoint,
# which is `client` in this case:
result = client.get_profile()
# {code_example|end}
fixture = FIXTURES['get-profile']
check_if_equal = ['email', 'full_name', 'msg', 'result', 'short_name']
check_if_exists = ['client_id', 'is_admin', 'is_bot', 'max_message_id',
'pointer', 'user_id']
test_against_fixture(result, fixture, check_if_equal=check_if_equal,
check_if_exists=check_if_exists)
def get_stream_id(client):
# type: (Client) -> None
# {code_example|start}
# Get the ID of a given stream
stream_name = 'new stream'
result = client.get_stream_id(stream_name)
# {code_example|end}
validate_against_openapi_schema(result, '/get_stream_id', 'get', '200')
def get_streams(client):
# type: (Client) -> None
# {code_example|start}
# Get all streams that the user has access to
result = client.get_streams()
# {code_example|end}
validate_against_openapi_schema(result, '/streams', 'get', '200')
streams = [s for s in result['streams'] if s['name'] == 'new stream']
assert streams[0]['description'] == 'New stream for testing'
# {code_example|start}
# You may pass in one or more of the query parameters mentioned above
# as keyword arguments, like so:
result = client.get_streams(include_public=False)
# {code_example|end}
validate_against_openapi_schema(result, '/streams', 'get', '200')
assert len(result['streams']) == 4
def get_user_groups(client):
# type: (Client) -> None
# {code_example|start}
# Get all user groups of the realm
result = client.get_user_groups()
# {code_example|end}
validate_against_openapi_schema(result, '/user_groups', 'get', '200')
user_groups = [u for u in result['user_groups'] if u['name'] == "hamletcharacters"]
assert user_groups[0]['description'] == 'Characters of Hamlet'
def test_user_not_authorized_error(nonadmin_client):
# type: (Client) -> None
result = nonadmin_client.get_streams(include_all_active=True)
fixture = FIXTURES['user-not-authorized-error']
test_against_fixture(result, fixture)
def get_subscribers(client):
# type: (Client) -> None
result = client.get_subscribers(stream='new stream')
assert result['subscribers'] == ['iago@zulip.com', 'newbie@zulip.com']
def get_user_agent(client):
# type: (Client) -> None
result = client.get_user_agent()
assert result.startswith('ZulipPython/')
def list_subscriptions(client):
# type: (Client) -> None
# {code_example|start}
# Get all streams that the user is subscribed to
result = client.list_subscriptions()
# {code_example|end}
fixture = FIXTURES['get-subscribed-streams']
test_against_fixture(result, fixture, check_if_equal=['msg', 'result'],
check_if_exists=['subscriptions'])
streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']
assert streams[0]['description'] == 'New stream for testing'
def remove_subscriptions(client):
# type: (Client) -> None
# {code_example|start}
# Unsubscribe from the stream "new stream"
result = client.remove_subscriptions(
['new stream']
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions',
'delete', '200')
# test it was actually removed
result = client.list_subscriptions()
assert result['result'] == 'success'
streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']
assert len(streams) == 0
# {code_example|start}
# Unsubscribe another user from the stream "new stream"
result = client.remove_subscriptions(
['new stream'],
principals=['newbie@zulip.com']
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions',
'delete', '200')
def toggle_mute_topic(client):
# type: (Client) -> None
# Send a test message
message = {
'type': 'stream',
'to': 'Denmark',
'topic': 'boat party'
}
client.call_endpoint(
url='messages',
method='POST',
request=message
)
# {code_example|start}
# Mute the topic "boat party" in the stream "Denmark"
request = {
'stream': 'Denmark',
'topic': 'boat party',
'op': 'add'
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/muted_topics',
'patch', '200')
# {code_example|start}
# Unmute the topic "boat party" in the stream "Denmark"
request = {
'stream': 'Denmark',
'topic': 'boat party',
'op': 'remove'
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/muted_topics',
'patch', '200')
def mark_all_as_read(client):
# type: (Client) -> None
# {code_example|start}
# Mark all of the user's unread messages as read
result = client.mark_all_as_read()
# {code_example|end}
validate_against_openapi_schema(result, '/mark_all_as_read', 'post', '200')
def mark_stream_as_read(client):
# type: (Client) -> None
# {code_example|start}
# Mark the unread messages in stream with ID "1" as read
result = client.mark_stream_as_read(1)
# {code_example|end}
validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')
def mark_topic_as_read(client):
# type: (Client) -> None
# Grab an existing topic name
topìc_name = client.get_stream_topics(1)['topics'][0]['name']
# {code_example|start}
# Mark the unread messages in stream 1's topic "topic_name" as read
result = client.mark_topic_as_read(1, topìc_name)
# {code_example|end}
validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')
def update_subscription_settings(client):
# type: (Client) -> None
# {code_example|start}
# Update the user's subscription in stream #1 to pin it to the top of the
# stream list; and in stream #3 to have the hex color "f00"
request = [{
'stream_id': 1,
'property': 'pin_to_top',
'value': True
}, {
'stream_id': 3,
'property': 'color',
'value': 'f00'
}]
result = client.update_subscription_settings(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/properties',
'POST', '200')
def render_message(client):
# type: (Client) -> None
# {code_example|start}
# Render a message
request = {
'content': '**foo**'
}
result = client.render_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/render', 'post', '200')
def get_messages(client):
# type: (Client) -> None
# {code_example|start}
# Get the 3 last messages sent by "iago@zulip.com" to the stream "Verona"
request = {
'use_first_unread_anchor': True,
'num_before': 3,
'num_after': 0,
'narrow': [{'operator': 'sender', 'operand': 'iago@zulip.com'},
{'operator': 'stream', 'operand': 'Verona'}],
'client_gravatar': True,
'apply_markdown': True
} # type: Dict[str, Any]
result = client.get_messages(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'get', '200')
assert len(result['messages']) <= request['num_before']
def get_raw_message(client, message_id):
# type: (Client, int) -> None
assert int(message_id)
# {code_example|start}
# Get the raw content of the message with ID "message_id"
result = client.get_raw_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'get',
'200')
def send_message(client):
# type: (Client) -> int
# {code_example|start}
# Send a stream message
request = {
"type": "stream",
"to": "Denmark",
"subject": "Castle",
"content": "I come not, friends, to steal away your hearts."
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'post', '200')
# test that the message was actually sent
message_id = result['id']
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET'
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
# {code_example|start}
# Send a private message
request = {
"type": "private",
"to": "iago@zulip.com",
"content": "With mirth and laughter let old wrinkles come."
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'post', '200')
# test that the message was actually sent
message_id = result['id']
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET'
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
return message_id
def test_nonexistent_stream_error(client):
# type: (Client) -> None
request = {
"type": "stream",
"to": "nonexistent_stream",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts."
}
result = client.send_message(request)
validate_against_openapi_schema(result, '/messages', 'post',
'400_non_existing_stream')
def test_private_message_invalid_recipient(client):
# type: (Client) -> None
request = {
"type": "private",
"to": "eeshan@zulip.com",
"content": "With mirth and laughter let old wrinkles come."
}
result = client.send_message(request)
validate_against_openapi_schema(result, '/messages', 'post',
'400_non_existing_user')
def update_message(client, message_id):
# type: (Client, int) -> None
assert int(message_id)
# {code_example|start}
# Edit a message
# (make sure that message_id below is set to the ID of the
# message you wish to update)
request = {
"message_id": message_id,
"content": "New content"
}
result = client.update_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'patch',
'200')
# test it was actually updated
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET'
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
def test_update_message_edit_permission_error(client, nonadmin_client):
# type: (Client, Client) -> None
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts."
}
result = client.send_message(request)
request = {
"message_id": result["id"],
"content": "New content"
}
result = nonadmin_client.update_message(request)
fixture = FIXTURES['update-message-edit-permission-error']
test_against_fixture(result, fixture)
def delete_message(client, message_id):
# type: (Client, int) -> None
# {code_example|start}
# Delete the message with ID "message_id"
result = client.delete_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',
'200')
def test_delete_message_edit_permission_error(client, nonadmin_client):
# type: (Client, Client) -> None
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts."
}
result = client.send_message(request)
result = nonadmin_client.delete_message(result['id'])
validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',
'400_not_admin')
def get_message_history(client, message_id):
# type: (Client, int) -> None
# {code_example|start}
# Get the edit history for message with ID "message_id"
result = client.get_message_history(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}/history',
'get', '200')
def get_realm_emoji(client):
# type: (Client) -> None
# {code_example|start}
result = client.get_realm_emoji()
# {code_example|end}
validate_against_openapi_schema(result, '/realm/emoji', 'GET', '200')
def update_message_flags(client):
# type: (Client) -> None
# Send a few test messages
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts."
} # type: Dict[str, Any]
message_ids = []
for i in range(0, 3):
message_ids.append(client.send_message(request)['id'])
# {code_example|start}
# Add the "read" flag to the messages with IDs in "message_ids"
request = {
'messages': message_ids,
'op': 'add',
'flag': 'read'
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/flags', 'post',
'200')
# {code_example|start}
# Remove the "starred" flag from the messages with IDs in "message_ids"
request = {
'messages': message_ids,
'op': 'remove',
'flag': 'starred'
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/flags', 'post',
'200')
def register_queue(client):
# type: (Client) -> str
# {code_example|start}
# Register the queue
result = client.register(
event_types=['message', 'realm_emoji']
)
# {code_example|end}
validate_against_openapi_schema(result, '/register', 'post', '200')
return result['queue_id']
def deregister_queue(client, queue_id):
# type: (Client, str) -> None
# {code_example|start}
# Delete a queue (queue_id is the ID of the queue
# to be removed)
result = client.deregister(queue_id)
# {code_example|end}
validate_against_openapi_schema(result, '/events', 'delete', '200')
# Test "BAD_EVENT_QUEUE_ID" error
result = client.deregister(queue_id)
validate_against_openapi_schema(result, '/events', 'delete', '400')
def get_server_settings(client):
# type: (Client) -> None
# {code_example|start}
# Fetch the settings for this server
result = client.get_server_settings()
# {code_example|end}
validate_against_openapi_schema(result, '/server_settings', 'get', '200')
def upload_file(client):
# type: (Client) -> None
fp = StringIO("zulip")
fp.name = "zulip.txt"
# {code_example|start}
# Upload a file
# (Make sure that 'fp' is a file object)
result = client.call_endpoint(
'user_uploads',
method='POST',
files=[fp]
)
# {code_example|end}
validate_against_openapi_schema(result, '/user_uploads', 'post', '200')
def get_stream_topics(client, stream_id):
# type: (Client, int) -> None
# {code_example|start}
result = client.get_stream_topics(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/{stream_id}/topics',
'get', '200')
def set_typing_status(client):
# type: (Client) -> None
# {code_example|start}
# The user has started to type in the group PM with Iago and Polonius
request = {
'op': 'start',
'to': ['iago@zulip.com', 'polonius@zulip.com']
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, '/typing', 'post', '200')
# {code_example|start}
# The user has finished typing in the group PM with Iago and Polonius
request = {
'op': 'stop',
'to': ['iago@zulip.com', 'polonius@zulip.com']
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, '/typing', 'post', '200')
def test_invalid_api_key(client_with_invalid_key):
# type: (Client) -> None
result = client_with_invalid_key.list_subscriptions()
fixture = FIXTURES['invalid-api-key']
test_against_fixture(result, fixture)
def test_missing_request_argument(client):
# type: (Client) -> None
result = client.render_message({})
fixture = FIXTURES['missing-request-argument-error']
test_against_fixture(result, fixture)
def test_invalid_stream_error(client):
# type: (Client) -> None
result = client.get_stream_id('nonexistent')
validate_against_openapi_schema(result, '/get_stream_id', 'get', '400')
TEST_FUNCTIONS = {
'/mark_all_as_read:post': mark_all_as_read,
'/mark_stream_as_read:post': mark_stream_as_read,
'/mark_topic_as_read:post': mark_topic_as_read,
'/messages/render:post': render_message,
'/messages:get': get_messages,
'/messages:post': send_message,
'/messages/{message_id}:get': get_raw_message,
'/messages/{message_id}:patch': update_message,
'/messages/{message_id}:delete': delete_message,
'/messages/{message_id}/history:get': get_message_history,
'/messages/flags:post': update_message_flags,
'/get_stream_id:get': get_stream_id,
'get-subscribed-streams': list_subscriptions,
'/streams:get': get_streams,
'/users:post': create_user,
'get-profile': get_profile,
'add-subscriptions': add_subscriptions,
'/users/{email}/presence:get': get_user_presence,
'/users/me/subscriptions:delete': remove_subscriptions,
'/users/me/subscriptions/muted_topics:patch': toggle_mute_topic,
'/users/me/subscriptions/properties:post': update_subscription_settings,
'/users:get': get_members,
'/realm/emoji:get': get_realm_emoji,
'/realm/filters:get': get_realm_filters,
'/realm/filters:post': add_realm_filter,
'/realm/filters/<filter_id>:delete': remove_realm_filter,
'/register:post': register_queue,
'/events:delete': deregister_queue,
'/server_settings:get': get_server_settings,
'/user_uploads:post': upload_file,
'/users/me/{stream_id}/topics:get': get_stream_topics,
'/typing:post': set_typing_status,
'/user_groups:get': get_user_groups,
}
# SETUP METHODS FOLLOW
def test_against_fixture(result, fixture, check_if_equal=[], check_if_exists=[]):
# type: (Dict[str, Any], Dict[str, Any], Optional[Iterable[str]], Optional[Iterable[str]]) -> None
assertLength(result, fixture)
if not check_if_equal and not check_if_exists:
for key, value in fixture.items():
assertEqual(key, result, fixture)
if check_if_equal:
for key in check_if_equal:
assertEqual(key, result, fixture)
if check_if_exists:
for key in check_if_exists:
assertIn(key, result)
def assertEqual(key, result, fixture):
# type: (str, Dict[str, Any], Dict[str, Any]) -> None
if result[key] != fixture[key]:
first = "{key} = {value}".format(key=key, value=result[key])
second = "{key} = {value}".format(key=key, value=fixture[key])
raise AssertionError("Actual and expected outputs do not match; showing diff:\n" +
mdiff.diff_strings(first, second))
else:
assert result[key] == fixture[key]
def assertLength(result, fixture):
# type: (Dict[str, Any], Dict[str, Any]) -> None
if len(result) != len(fixture):
result_string = json.dumps(result, indent=4, sort_keys=True)
fixture_string = json.dumps(fixture, indent=4, sort_keys=True)
raise AssertionError("The lengths of the actual and expected outputs do not match; showing diff:\n" +
mdiff.diff_strings(result_string, fixture_string))
else:
assert len(result) == len(fixture)
def assertIn(key, result):
# type: (str, Dict[str, Any]) -> None
if key not in result.keys():
raise AssertionError(
"The actual output does not contain the the key `{key}`.".format(key=key)
)
else:
assert key in result
def test_messages(client, nonadmin_client):
# type: (Client, Client) -> None
render_message(client)
message_id = send_message(client)
update_message(client, message_id)
get_raw_message(client, message_id)
get_messages(client)
get_message_history(client, message_id)
delete_message(client, message_id)
mark_all_as_read(client)
mark_stream_as_read(client)
mark_topic_as_read(client)
update_message_flags(client)
test_nonexistent_stream_error(client)
test_private_message_invalid_recipient(client)
test_update_message_edit_permission_error(client, nonadmin_client)
test_delete_message_edit_permission_error(client, nonadmin_client)
def test_users(client):
# type: (Client) -> None
create_user(client)
get_members(client)
get_profile(client)
upload_file(client)
set_typing_status(client)
get_user_presence(client)
get_user_groups(client)
def test_streams(client, nonadmin_client):
# type: (Client, Client) -> None
add_subscriptions(client)
test_add_subscriptions_already_subscribed(client)
list_subscriptions(client)
get_stream_id(client)
get_streams(client)
get_subscribers(client)
remove_subscriptions(client)
toggle_mute_topic(client)
update_subscription_settings(client)
get_stream_topics(client, 1)
test_user_not_authorized_error(nonadmin_client)
test_authorization_errors_fatal(client, nonadmin_client)
def test_queues(client):
# type: (Client) -> None
# Note that the example for api/get-events-from-queue is not tested.
# Since, methods such as client.get_events() or client.call_on_each_message
# are blocking calls and since the event queue backend is already
# thoroughly tested in zerver/tests/test_event_queue.py, it is not worth
# the effort to come up with asynchronous logic for testing those here.
queue_id = register_queue(client)
deregister_queue(client, queue_id)
def test_server_organizations(client):
# type: (Client) -> None
get_realm_filters(client)
add_realm_filter(client)
get_server_settings(client)
remove_realm_filter(client)
get_realm_emoji(client)
def test_errors(client):
# type: (Client) -> None
test_missing_request_argument(client)
test_invalid_stream_error(client)
def test_the_api(client, nonadmin_client):
# type: (Client, Client) -> None
get_user_agent(client)
test_users(client)
test_streams(client, nonadmin_client)
test_messages(client, nonadmin_client)
test_queues(client)
test_server_organizations(client)
test_errors(client)
| [] | [] | [] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/attachments.py |
from django.utils.translation import ugettext as _
from typing import Any, Dict, List
from zerver.lib.request import JsonableError
from zerver.lib.upload import delete_message_image
from zerver.models import Attachment, UserProfile
def user_attachments(user_profile: UserProfile) -> List[Dict[str, Any]]:
attachments = Attachment.objects.filter(owner=user_profile).prefetch_related('messages')
return [a.to_dict() for a in attachments]
def access_attachment_by_id(user_profile: UserProfile, attachment_id: int,
needs_owner: bool=False) -> Attachment:
query = Attachment.objects.filter(id=attachment_id)
if needs_owner:
query = query.filter(owner=user_profile)
attachment = query.first()
if attachment is None:
raise JsonableError(_("Invalid attachment"))
return attachment
def remove_attachment(user_profile: UserProfile, attachment: Attachment) -> None:
try:
delete_message_image(attachment.path_id)
except Exception:
raise JsonableError(_("An error occurred while deleting the attachment. Please try again later."))
attachment.delete()
| [
"UserProfile",
"UserProfile",
"int",
"UserProfile",
"Attachment"
] | [
270,
490,
518,
887,
912
] | [
281,
501,
521,
898,
922
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/avatar.py | from django.conf import settings
if False:
from zerver.models import UserProfile
from typing import Any, Dict, Optional
from zerver.lib.avatar_hash import gravatar_hash, user_avatar_path_from_ids
from zerver.lib.upload import upload_backend, MEDIUM_AVATAR_SIZE
from zerver.models import UserProfile
import urllib
def avatar_url(user_profile: UserProfile, medium: bool=False, client_gravatar: bool=False) -> Optional[str]:
return get_avatar_field(
user_id=user_profile.id,
realm_id=user_profile.realm_id,
email=user_profile.email,
avatar_source=user_profile.avatar_source,
avatar_version=user_profile.avatar_version,
medium=medium,
client_gravatar=client_gravatar,
)
def avatar_url_from_dict(userdict: Dict[str, Any], medium: bool=False) -> str:
'''
DEPRECATED: We should start using
get_avatar_field to populate users,
particularly for codepaths where the
client can compute gravatar URLS
on the client side.
'''
url = _get_unversioned_avatar_url(
userdict['id'],
userdict['avatar_source'],
userdict['realm_id'],
email=userdict['email'],
medium=medium)
url += '&version=%d' % (userdict['avatar_version'],)
return url
def get_avatar_field(user_id: int,
realm_id: int,
email: str,
avatar_source: str,
avatar_version: int,
medium: bool,
client_gravatar: bool) -> Optional[str]:
'''
Most of the parameters to this function map to fields
by the same name in UserProfile (avatar_source, realm_id,
email, etc.).
Then there are these:
medium - This means we want a medium-sized avatar. This can
affect the "s" parameter for gravatar avatars, or it
can give us something like foo-medium.png for
user-uploaded avatars.
client_gravatar - If the client can compute their own
gravatars, this will be set to True, and we'll avoid
computing them on the server (mostly to save bandwidth).
'''
if client_gravatar:
'''
If our client knows how to calculate gravatar hashes, we
will return None and let the client compute the gravatar
url.
'''
if settings.ENABLE_GRAVATAR:
if avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
return None
'''
If we get this far, we'll compute an avatar URL that may be
either user-uploaded or a gravatar, and then we'll add version
info to try to avoid stale caches.
'''
url = _get_unversioned_avatar_url(
user_profile_id=user_id,
avatar_source=avatar_source,
realm_id=realm_id,
email=email,
medium=medium,
)
url += '&version=%d' % (avatar_version,)
return url
def get_gravatar_url(email: str, avatar_version: int, medium: bool=False) -> str:
url = _get_unversioned_gravatar_url(email, medium)
url += '&version=%d' % (avatar_version,)
return url
def _get_unversioned_gravatar_url(email: str, medium: bool) -> str:
if settings.ENABLE_GRAVATAR:
gravitar_query_suffix = "&s=%s" % (MEDIUM_AVATAR_SIZE,) if medium else ""
hash_key = gravatar_hash(email)
return "https://secure.gravatar.com/avatar/%s?d=identicon%s" % (hash_key, gravitar_query_suffix)
return settings.DEFAULT_AVATAR_URI+'?x=x'
def _get_unversioned_avatar_url(user_profile_id: int,
avatar_source: str,
realm_id: int,
email: Optional[str]=None,
medium: bool=False) -> str:
if avatar_source == 'U':
hash_key = user_avatar_path_from_ids(user_profile_id, realm_id)
return upload_backend.get_avatar_url(hash_key, medium=medium)
assert email is not None
return _get_unversioned_gravatar_url(email, medium)
def absolute_avatar_url(user_profile: UserProfile) -> str:
"""
Absolute URLs are used to simplify logic for applications that
won't be served by browsers, such as rendering GCM notifications.
"""
avatar = avatar_url(user_profile)
# avatar_url can return None if client_gravatar=True, however here we use the default value of False
assert avatar is not None
return urllib.parse.urljoin(user_profile.realm.uri, avatar)
| [
"UserProfile",
"Dict[str, Any]",
"int",
"int",
"str",
"str",
"int",
"bool",
"bool",
"str",
"int",
"str",
"bool",
"int",
"str",
"int",
"UserProfile"
] | [
350,
775,
1350,
1386,
1419,
1460,
1502,
1536,
1580,
2994,
3015,
3205,
3218,
3588,
3640,
3687,
4106
] | [
361,
789,
1353,
1389,
1422,
1463,
1505,
1540,
1584,
2997,
3018,
3208,
3222,
3591,
3643,
3690,
4117
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/avatar_hash.py |
from django.conf import settings
from zerver.lib.utils import make_safe_digest
from zerver.models import UserProfile
import hashlib
def gravatar_hash(email: str) -> str:
"""Compute the Gravatar hash for an email address."""
# Non-ASCII characters aren't permitted by the currently active e-mail
# RFCs. However, the IETF has published https://tools.ietf.org/html/rfc4952,
# outlining internationalization of email addresses, and regardless if we
# typo an address or someone manages to give us a non-ASCII address, let's
# not error out on it.
return make_safe_digest(email.lower(), hashlib.md5)
def user_avatar_hash(uid: str) -> str:
# WARNING: If this method is changed, you may need to do a migration
# similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .
# The salt probably doesn't serve any purpose now. In the past we
# used a hash of the email address, not the user ID, and we salted
# it in order to make the hashing scheme different from Gravatar's.
user_key = uid + settings.AVATAR_SALT
return make_safe_digest(user_key, hashlib.sha1)
def user_avatar_path(user_profile: UserProfile) -> str:
# WARNING: If this method is changed, you may need to do a migration
# similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .
return user_avatar_path_from_ids(user_profile.id, user_profile.realm_id)
def user_avatar_path_from_ids(user_profile_id: int, realm_id: int) -> str:
user_id_hash = user_avatar_hash(str(user_profile_id))
return '%s/%s' % (str(realm_id), user_id_hash)
| [
"str",
"str",
"UserProfile",
"int",
"int"
] | [
162,
656,
1162,
1456,
1471
] | [
165,
659,
1173,
1459,
1474
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bot_config.py | from django.conf import settings
from django.db.models import Sum
from django.db.models.query import F
from django.db.models.functions import Length
from zerver.models import BotConfigData, UserProfile
from typing import List, Dict, Optional
from collections import defaultdict
import os
import configparser
import importlib
class ConfigError(Exception):
pass
def get_bot_config(bot_profile: UserProfile) -> Dict[str, str]:
entries = BotConfigData.objects.filter(bot_profile=bot_profile)
if not entries:
raise ConfigError("No config data available.")
return {entry.key: entry.value for entry in entries}
def get_bot_configs(bot_profile_ids: List[int]) -> Dict[int, Dict[str, str]]:
if not bot_profile_ids:
return {}
entries = BotConfigData.objects.filter(bot_profile_id__in=bot_profile_ids)
entries_by_uid = defaultdict(dict) # type: Dict[int, Dict[str, str]]
for entry in entries:
entries_by_uid[entry.bot_profile_id].update({entry.key: entry.value})
return entries_by_uid
def get_bot_config_size(bot_profile: UserProfile, key: Optional[str]=None) -> int:
if key is None:
return BotConfigData.objects.filter(bot_profile=bot_profile) \
.annotate(key_size=Length('key'), value_size=Length('value')) \
.aggregate(sum=Sum(F('key_size')+F('value_size')))['sum'] or 0
else:
try:
return len(key) + len(BotConfigData.objects.get(bot_profile=bot_profile, key=key).value)
except BotConfigData.DoesNotExist:
return 0
def set_bot_config(bot_profile: UserProfile, key: str, value: str) -> None:
config_size_limit = settings.BOT_CONFIG_SIZE_LIMIT
old_entry_size = get_bot_config_size(bot_profile, key)
new_entry_size = len(key) + len(value)
old_config_size = get_bot_config_size(bot_profile)
new_config_size = old_config_size + (new_entry_size - old_entry_size)
if new_config_size > config_size_limit:
raise ConfigError("Cannot store configuration. Request would require {} characters. "
"The current configuration size limit is {} characters.".format(new_config_size,
config_size_limit))
obj, created = BotConfigData.objects.get_or_create(bot_profile=bot_profile, key=key,
defaults={'value': value})
if not created:
obj.value = value
obj.save()
def load_bot_config_template(bot: str) -> Dict[str, str]:
bot_module_name = 'zulip_bots.bots.{}'.format(bot)
bot_module = importlib.import_module(bot_module_name)
bot_module_path = os.path.dirname(bot_module.__file__)
config_path = os.path.join(bot_module_path, '{}.conf'.format(bot))
if os.path.isfile(config_path):
config = configparser.ConfigParser()
with open(config_path) as conf:
config.readfp(conf) # type: ignore # readfp->read_file in python 3, so not in stubs
return dict(config.items(bot))
else:
return dict()
| [
"UserProfile",
"List[int]",
"UserProfile",
"UserProfile",
"str",
"str",
"str"
] | [
402,
672,
1080,
1637,
1655,
1667,
2593
] | [
413,
681,
1091,
1648,
1658,
1670,
2596
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bot_lib.py | import json
import logging
import os
import signal
import sys
import time
import re
import importlib
from zerver.lib.actions import internal_send_private_message, \
internal_send_stream_message, internal_send_huddle_message
from zerver.models import UserProfile, get_active_user
from zerver.lib.bot_storage import get_bot_storage, set_bot_storage, \
is_key_in_bot_storage, get_bot_storage_size, remove_bot_storage
from zerver.lib.bot_config import get_bot_config, ConfigError
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.topic import get_topic_from_message_info
import configparser
if False:
from mypy_extensions import NoReturn
from typing import Any, Optional, List, Dict
from types import ModuleType
our_dir = os.path.dirname(os.path.abspath(__file__))
from zulip_bots.lib import RateLimit
def get_bot_handler(service_name: str) -> Any:
# Check that this service is present in EMBEDDED_BOTS, add exception handling.
is_present_in_registry = any(service_name == embedded_bot_service.name for
embedded_bot_service in EMBEDDED_BOTS)
if not is_present_in_registry:
return None
bot_module_name = 'zulip_bots.bots.%s.%s' % (service_name, service_name)
bot_module = importlib.import_module(bot_module_name) # type: Any
return bot_module.handler_class()
class StateHandler:
storage_size_limit = 10000000 # type: int # TODO: Store this in the server configuration model.
def __init__(self, user_profile: UserProfile) -> None:
self.user_profile = user_profile
self.marshal = lambda obj: json.dumps(obj)
self.demarshal = lambda obj: json.loads(obj)
def get(self, key: str) -> str:
return self.demarshal(get_bot_storage(self.user_profile, key))
def put(self, key: str, value: str) -> None:
set_bot_storage(self.user_profile, [(key, self.marshal(value))])
def remove(self, key: str) -> None:
remove_bot_storage(self.user_profile, [key])
def contains(self, key: str) -> bool:
return is_key_in_bot_storage(self.user_profile, key)
class EmbeddedBotQuitException(Exception):
pass
class EmbeddedBotHandler:
def __init__(self, user_profile: UserProfile) -> None:
# Only expose a subset of our UserProfile's functionality
self.user_profile = user_profile
self._rate_limit = RateLimit(20, 5)
self.full_name = user_profile.full_name
self.email = user_profile.email
self.storage = StateHandler(user_profile)
def send_message(self, message: Dict[str, Any]) -> None:
if not self._rate_limit.is_legal():
self._rate_limit.show_error_and_exit()
if message['type'] == 'stream':
internal_send_stream_message(self.user_profile.realm, self.user_profile, message['to'],
message['topic'], message['content'])
return
assert message['type'] == 'private'
# Ensure that it's a comma-separated list, even though the
# usual 'to' field could be either a List[str] or a str.
recipients = ','.join(message['to']).split(',')
if len(message['to']) == 1:
recipient_user = get_active_user(recipients[0], self.user_profile.realm)
internal_send_private_message(self.user_profile.realm, self.user_profile,
recipient_user, message['content'])
else:
internal_send_huddle_message(self.user_profile.realm, self.user_profile,
recipients, message['content'])
def send_reply(self, message: Dict[str, Any], response: str) -> None:
if message['type'] == 'private':
self.send_message(dict(
type='private',
to=[x['email'] for x in message['display_recipient']],
content=response,
sender_email=message['sender_email'],
))
else:
self.send_message(dict(
type='stream',
to=message['display_recipient'],
topic=get_topic_from_message_info(message),
content=response,
sender_email=message['sender_email'],
))
# The bot_name argument exists only to comply with ExternalBotHandler.get_config_info().
def get_config_info(self, bot_name: str, optional: bool=False) -> Dict[str, str]:
try:
return get_bot_config(self.user_profile)
except ConfigError:
if optional:
return dict()
raise
def quit(self, message: str= "") -> None:
raise EmbeddedBotQuitException(message)
| [
"str",
"UserProfile",
"str",
"str",
"str",
"str",
"str",
"UserProfile",
"Dict[str, Any]",
"Dict[str, Any]",
"str",
"str"
] | [
865,
1516,
1707,
1815,
1827,
1941,
2037,
2229,
2577,
3662,
3688,
4412
] | [
868,
1527,
1710,
1818,
1830,
1944,
2040,
2240,
2591,
3676,
3691,
4415
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bot_storage.py | from django.conf import settings
from django.db.models import Sum
from django.db.models.query import F
from django.db.models.functions import Length
from zerver.models import BotStorageData, UserProfile, Length
from typing import Optional, List, Tuple
class StateError(Exception):
pass
def get_bot_storage(bot_profile: UserProfile, key: str) -> str:
try:
return BotStorageData.objects.get(bot_profile=bot_profile, key=key).value
except BotStorageData.DoesNotExist:
raise StateError("Key does not exist.")
def get_bot_storage_size(bot_profile: UserProfile, key: Optional[str]=None) -> int:
if key is None:
return BotStorageData.objects.filter(bot_profile=bot_profile) \
.annotate(key_size=Length('key'), value_size=Length('value')) \
.aggregate(sum=Sum(F('key_size')+F('value_size')))['sum'] or 0
else:
try:
return len(key) + len(BotStorageData.objects.get(bot_profile=bot_profile, key=key).value)
except BotStorageData.DoesNotExist:
return 0
def set_bot_storage(bot_profile: UserProfile, entries: List[Tuple[str, str]]) -> None:
storage_size_limit = settings.USER_STATE_SIZE_LIMIT
storage_size_difference = 0
for key, value in entries:
if type(key) is not str:
raise StateError("Key type is {}, but should be str.".format(type(key)))
if type(value) is not str:
raise StateError("Value type is {}, but should be str.".format(type(value)))
storage_size_difference += (len(key) + len(value)) - get_bot_storage_size(bot_profile, key)
new_storage_size = get_bot_storage_size(bot_profile) + storage_size_difference
if new_storage_size > storage_size_limit:
raise StateError("Request exceeds storage limit by {} characters. The limit is {} characters."
.format(new_storage_size - storage_size_limit, storage_size_limit))
else:
for key, value in entries:
BotStorageData.objects.update_or_create(bot_profile=bot_profile, key=key,
defaults={'value': value})
def remove_bot_storage(bot_profile: UserProfile, keys: List[str]) -> None:
queryset = BotStorageData.objects.filter(bot_profile=bot_profile, key__in=keys)
if len(queryset) < len(keys):
raise StateError("Key does not exist.")
queryset.delete()
def is_key_in_bot_storage(bot_profile: UserProfile, key: str) -> bool:
return BotStorageData.objects.filter(bot_profile=bot_profile, key=key).exists()
def get_keys_in_bot_storage(bot_profile: UserProfile) -> List[str]:
return list(BotStorageData.objects.filter(bot_profile=bot_profile).values_list('key', flat=True))
| [
"UserProfile",
"str",
"UserProfile",
"UserProfile",
"List[Tuple[str, str]]",
"UserProfile",
"List[str]",
"UserProfile",
"str",
"UserProfile"
] | [
326,
344,
575,
1138,
1160,
2225,
2244,
2492,
2510,
2650
] | [
337,
347,
586,
1149,
1181,
2236,
2253,
2503,
2513,
2661
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/__init__.py | # Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
from typing import (Any, Callable, Dict, Iterable, List, NamedTuple,
Optional, Set, Tuple, TypeVar, Union, cast)
from mypy_extensions import TypedDict
from typing.re import Match, Pattern
import markdown
import logging
import traceback
import urllib
import re
import os
import html
import platform
import time
import functools
import ujson
import xml.etree.cElementTree as etree
from xml.etree.cElementTree import Element, SubElement
from collections import deque, defaultdict
import requests
from django.core import mail
from django.conf import settings
from django.db.models import Q
from markdown.extensions import codehilite, nl2br, tables
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import translate_emoticons, emoticon_regex
from zerver.lib.mention import possible_mentions, \
possible_user_group_mentions, extract_user_group
from zerver.lib.url_encoding import encode_stream
from zerver.lib.thumbnail import is_thumbor_enabled, user_uploads_or_external
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, NotFoundInCache
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
all_realm_filters,
get_active_streams,
MAX_MESSAGE_LENGTH,
Message,
Realm,
RealmFilter,
realm_filters_for_realm,
UserProfile,
UserGroup,
UserGroupMembership,
)
import zerver.lib.mention as mention
from zerver.lib.tex import render_tex
from zerver.lib.exceptions import BugdownRenderingException
FullNameInfo = TypedDict('FullNameInfo', {
'id': int,
'email': str,
'full_name': str,
})
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
"^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
LINK_REGEX = None # type: Pattern
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs.
global LINK_REGEX
if LINK_REGEX is None:
# NOTE: this is a very expensive step, it reads a file of tlds!
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
regex = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
%s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk, file_links)
LINK_REGEX = verbose_compile(regex)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
""" If the link points to a local destination we can just switch to that
instead of opening a new tab. """
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if link.startswith(realm_uri_prefix):
# +1 to skip the `/` before the hash link.
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled_for_realm(message: Optional[Message]=None,
realm: Optional[Realm]=None) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled_for_realm(message: Optional[Message]=None,
realm: Optional[Realm]=None) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement.getchildren():
if child.getchildren():
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
ElementFamily = NamedTuple('ElementFamily', [
('grandparent', Optional[Element]),
('parent', Element),
('child', Element)
])
ResultWithFamily = NamedTuple('ResultWithFamily', [
('family', ElementFamily),
('result', Any)
])
ElementPair = NamedTuple('ElementPair', [
('parent', Optional[Element]),
('value', Element)
])
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]]
) -> List[ResultWithFamily]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value.getchildren():
if child.getchildren():
queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = cast(ElementPair, currElementPair.parent)
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child
)
results.append(ResultWithFamily(
family=family,
result=result
))
return results
# height is not actually used
def add_a(
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: Optional[bool]=False
) -> None:
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
if insertion_index is not None:
div = markdown.util.etree.Element("div")
root.insert(insertion_index, div)
else:
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = markdown.util.etree.SubElement(a, "img")
if is_thumbor_enabled() and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={0}&size=thumbnail".format(
urllib.parse.quote(url, safe='')
))
img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format(
urllib.parse.quote(url, safe='')
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
container = markdown.util.etree.SubElement(root, "div")
container.set("class", "message_embed")
img_link = extracted_data.get('image')
if img_link:
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = markdown.util.etree.SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("target", "_blank")
img.set("class", "message_embed_image")
data_container = markdown.util.etree.SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = markdown.util.etree.SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = markdown.util.etree.SubElement(title_elm, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = markdown.util.etree.SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: str) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[str]) -> Union[str, Element]:
if m.group(4):
el = markdown.util.etree.Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown, bugdown: 'Bugdown') -> None:
# Passing in bugdown for access to config to check if realm is zulip.com
self.bugdown = bugdown
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def image_preview_enabled(self) -> bool:
return image_preview_enabled_for_realm(
self.markdown.zulip_message,
self.markdown.zulip_realm,
)
def is_image(self, url: str) -> bool:
if not self.image_preview_enabled():
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.image_preview_enabled():
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# If it matches, match.group(2) is the video id.
youtube_re = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)' + \
r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))' + \
r'?([0-9A-Za-z_-]+)(?(1).+)?$'
match = re.match(youtube_re, url)
if match is None:
return None
return match.group(2)
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,)
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.image_preview_enabled():
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return "Vimeo - {}".format(title)
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process = [] # type: List[Dict[str, Any]]
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.markdown.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
current_node = elem = url_to_a(db_data, item['url'], item['text'])
else:
current_node = elem = make_emoji(item['codepoint'], item['title'])
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user'] # type: Dict[str, Any]
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media = res.get('media', []) # type: List[Dict[str, Any]]
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = "- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning(traceback.format_exc())
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]:
if e.tag == "a":
if e.text is not None:
return (e.get("href"), e.text)
return (e.get("href"), e.get("href"))
return None
def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = (url == text)
if parent.tag == 'li':
add_a(parent, self.get_actual_image_url(url), url, title=text)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent.getchildren()):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
add_a(grandparent, actual_url, url, title=text)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent.getchildren()) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
add_a(root, actual_url, url, title=text)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
uncles = grandparent.getchildren()
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(uncles):
return insertion_index
uncle = uncles[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
continue
db_data = self.markdown.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not url_embed_preview_enabled_for_realm(self.markdown.zulip_message,
self.markdown.zulip_realm):
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.markdown.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
vm_id = self.vimeo_id(url)
if vm_id is not None:
vimeo_image = extracted_data.get('image')
vimeo_title = self.vimeo_title(extracted_data)
if vimeo_image is not None:
add_a(root, vimeo_image, url, vimeo_title,
None, "vimeo-video message_inline_image", vm_id,
already_thumbnailed=True)
if vimeo_title is not None:
found_url.family.child.text = vimeo_title
else:
add_embed(root, url, extracted_data)
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = markdown.util.etree.Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.markdown.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for regex in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(regex, content)
for email in matches:
if email:
emails.add(email)
return emails
path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "name_to_codepoint.json")
with open(path_to_name_to_codepoint) as name_to_codepoint_file:
name_to_codepoint = ujson.load(name_to_codepoint_file)
path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "codepoint_to_name.json")
with open(path_to_codepoint_to_name) as codepoint_to_name_file:
codepoint_to_name = ujson.load(codepoint_to_name_file)
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = markdown.util.etree.Element('span')
span.set('class', 'emoji emoji-%s' % (codepoint,))
span.set('title', title)
span.text = display_string
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.markdown.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji = {} # type: Dict[str, Dict[str, str]]
db_data = self.markdown.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.markdown.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return None
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match: Match[str]) -> Element:
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.text = text
return a_tag
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
return etree.fromstring(rendered.encode('utf-8'))
else: # Something went wrong while rendering
span = markdown.util.etree.Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$")
def url_filename(url: str) -> str:
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_title_re.match(url)
if match:
return match.group('filename')
else:
return url
def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None:
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
target_blank = True
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
target_blank = not href.startswith("#narrow") and not href.startswith('mailto:')
a.set('href', href)
a.text = text
fixup_link(a, target_blank)
return a
class VerbosePattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
markdown.inlinepatterns.Pattern.__init__(self, ' ', md)
# HACK: we just had python-markdown compile an empty regex.
# Now replace with the real regex compiled with the flags we want.
self.compiled_re = compiled_re
class AutoLink(VerbosePattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.markdown.zulip_db_data
return url_to_a(db_data, url)
class UListProcessor(markdown.blockprocessors.UListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile('^[ ]{0,3}[*][ ]+(.*)')
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if (not fence and lines[i] and
self.LI_RE.match(lines[i+1]) and
not self.LI_RE.match(lines[i])):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor):
""" Finds a sequence of lines numbered by the same number"""
RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)')
TAB_LENGTH = 2
def run(self, lines: List[str]) -> List[str]:
new_lines = [] # type: List[str]
current_list = [] # type: List[Match[str]]
current_indent = 0
for line in lines:
m = self.RE.match(line)
# Remember if this line is a continuation of already started list
is_next_item = (m and current_list
and current_indent == len(m.group(1)) // self.TAB_LENGTH)
if not is_next_item:
# There is no more items in the list we were processing
new_lines.extend(self.renumber(current_list))
current_list = []
if not m:
# Ordinary line
new_lines.append(line)
elif is_next_item:
# Another list item
current_list.append(m)
else:
# First list item
current_list = [m]
current_indent = len(m.group(1)) // self.TAB_LENGTH
new_lines.extend(self.renumber(current_list))
return new_lines
def renumber(self, mlist: List[Match[str]]) -> List[str]:
if not mlist:
return []
start_number = int(mlist[0].group(2))
# Change numbers only if every one is the same
change_numbers = True
for m in mlist:
if int(m.group(2)) != start_number:
change_numbers = False
break
lines = [] # type: List[str]
counter = start_number
for m in mlist:
number = str(counter) if change_numbers else m.group(2)
lines.append('%s%s. %s' % (m.group(1), number, m.group(3)))
counter += 1
return lines
# Based on markdown.inlinepatterns.LinkPattern
class LinkPattern(markdown.inlinepatterns.Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m: Match[str]) -> Optional[Element]:
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
db_data = self.markdown.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank=(href[:1] != '#'))
return el
def prepare_realm_pattern(source: str) -> str:
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + r')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.markdown.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user(name)
if wildcard:
self.markdown.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
self.markdown.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-mention')
el.set('data-user-id', user_id)
el.text = "@%s" % (name,)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
el.text = "@%s" % (name,)
return el
return None
class StreamPattern(VerbosePattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.markdown.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.markdown.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = markdown.util.etree.Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
stream_url = encode_stream(stream['id'], name)
el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url))
el.text = '#{stream_name}'.format(stream_name=name)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
return set(matches)
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.markdown.zulip_message.alert_words.
realm_words = db_data['possible_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for word in realm_words:
escaped = re.escape(word.lower())
match_re = re.compile('(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
self.markdown.zulip_message.alert_words.add(word)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(LinkPattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
ret = LinkPattern.handleMatch(self, m)
if ret is None:
return None
if not isinstance(ret, str):
ret.text = markdown.util.AtomicString(ret.text)
return ret
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Extension):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
del md.preprocessors['reference']
if self.getConfig('code_block_processor_disabled'):
del md.parser.blockprocessors['code']
for k in ('image_link', 'image_reference', 'automail',
'autolink', 'link', 'reference', 'short_reference',
'escape', 'strong_em', 'emphasis', 'emphasis2',
'linebreak', 'strong', 'backtick'):
del md.inlinePatterns[k]
try:
# linebreak2 was removed upstream in version 3.2.1, so
# don't throw an error if it is not there
del md.inlinePatterns['linebreak2']
except Exception:
pass
# Having the extension operations split into a bunch of
# smaller functions both helps with organization and
# simplifies profiling of the markdown engine build time.
self.extend_alert_words(md)
self.extend_text_formatting(md)
self.extend_block_formatting(md)
self.extend_avatars(md)
self.extend_modal_links(md)
self.extend_mentions(md)
self.extend_stream_links(md)
self.extend_emojis(md)
self.extend_misc(md)
def extend_alert_words(self, md: markdown.Markdown) -> None:
md.preprocessors.add("custom_text_notifications", AlertWordsNotificationProcessor(md), "_end")
def extend_text_formatting(self, md: markdown.Markdown) -> None:
# Inline code block without whitespace stripping
md.inlinePatterns.add(
"backtick",
BacktickPattern(r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'),
"_begin")
md.inlinePatterns.add(
'strong_em',
markdown.inlinepatterns.DoubleTagPattern(
r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*', 'strong,em'),
'>backtick')
# Custom bold syntax: **foo** but not __foo__
md.inlinePatterns.add('strong',
markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'),
'>not_strong')
# Custom strikethrough syntax: ~~foo~~
md.inlinePatterns.add('del',
markdown.inlinepatterns.SimpleTagPattern(
r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)', 'del'), '>strong')
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
md.inlinePatterns.add(
'emphasis',
markdown.inlinepatterns.SimpleTagPattern(r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*', 'em'),
'>strong')
def extend_block_formatting(self, md: markdown.Markdown) -> None:
for k in ('hashheader', 'setextheader', 'olist', 'ulist', 'indent'):
del md.parser.blockprocessors[k]
md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')
md.parser.blockprocessors.add('indent', ListIndentProcessor(md.parser), '<ulist')
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
md.parser.blockprocessors['quote'].RE = re.compile(
r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
def extend_avatars(self, md: markdown.Markdown) -> None:
# Note that !gravatar syntax should be deprecated long term.
md.inlinePatterns.add('avatar', Avatar(AVATAR_REGEX, md), '>backtick')
md.inlinePatterns.add('gravatar', Avatar(GRAVATAR_REGEX, md), '>backtick')
def extend_modal_links(self, md: markdown.Markdown) -> None:
md.inlinePatterns.add(
'modal_link',
ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'),
'>avatar')
def extend_mentions(self, md: markdown.Markdown) -> None:
md.inlinePatterns.add('usermention', UserMentionPattern(mention.find_mentions, md), '>backtick')
md.inlinePatterns.add('usergroupmention',
UserGroupMentionPattern(mention.user_group_mentions, md),
'>backtick')
def extend_stream_links(self, md: markdown.Markdown) -> None:
md.inlinePatterns.add('stream', StreamPattern(verbose_compile(STREAM_LINK_REGEX), md), '>backtick')
def extend_emojis(self, md: markdown.Markdown) -> None:
md.inlinePatterns.add(
'tex',
Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'),
'>backtick')
md.inlinePatterns.add('emoji', Emoji(EMOJI_REGEX, md), '<nl')
md.inlinePatterns.add('translate_emoticons', EmoticonTranslation(emoticon_regex, md), '>emoji')
md.inlinePatterns.add('unicodeemoji', UnicodeEmoji(unicode_emoji_regex), '_end')
def extend_misc(self, md: markdown.Markdown) -> None:
md.inlinePatterns.add('link', AtomicLinkPattern(markdown.inlinepatterns.LINK_RE, md), '>avatar')
for (pattern, format_string, id) in self.getConfig("realm_filters"):
md.inlinePatterns.add('realm_filters/%s' % (pattern,),
RealmFilterPattern(pattern, format_string, md), '>link')
md.inlinePatterns.add('autolink', AutoLink(get_web_link_regex(), md), '>link')
md.preprocessors.add('hanging_ulists',
BugdownUListPreprocessor(md),
"_begin")
md.preprocessors.add('auto_number_olist',
AutoNumberOListPreprocessor(md),
"_begin")
md.treeprocessors.add("inline_interesting_links", InlineInterestingLinkProcessor(md, self), "_end")
if settings.CAMO_URI:
md.treeprocessors.add("rewrite_to_https", InlineHttpsProcessor(md), "_end")
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
for k in list(md.inlinePatterns.keys()):
if k not in ["autolink"]:
del md.inlinePatterns[k]
for k in list(md.treeprocessors.keys()):
if k not in ["inline_interesting_links", "inline", "rewrite_to_https"]:
del md.treeprocessors[k]
for k in list(md.preprocessors.keys()):
if k not in ["custom_text_notifications"]:
del md.preprocessors[k]
for k in list(md.parser.blockprocessors.keys()):
if k not in ["paragraph"]:
del md.parser.blockprocessors[k]
md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown]
realm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]]
class EscapeHtml(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
del md.preprocessors['html_block']
del md.inlinePatterns['html']
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = markdown.Markdown(
output_format = 'html',
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False
),
fenced_code.makeExtension(),
EscapeHtml(),
Bugdown(realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway)])
return engine
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches = [] # type: List[str]
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the santized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_full_name_info(realm_id: int, full_names: Set[str]) -> Dict[str, FullNameInfo]:
if not full_names:
return dict()
# Remove the trailing part of the `user|id` mention syntax.
name_re = r'(?P<full_name>.+)\|\d+$'
for full_name in full_names.copy():
name_syntax_match = re.match(name_re, full_name)
if name_syntax_match:
full_names.remove(full_name)
full_names.add(name_syntax_match.group("full_name"))
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
dct = {} # type: Dict[str, FullNameInfo]
for row in rows:
key = row['full_name'].lower()
# To insert users with duplicate full names in the dict
if key in dct:
key = '{}|{}'.format(key, row['id'])
dct[key] = row
return dct
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
full_names = possible_mentions(content)
self.full_name_info = get_full_name_info(realm_id, full_names)
self.user_id_info = {
row['id']: row
for row in self.full_name_info.values()
}
self.init_user_group_data(realm_id=realm_id, content=content)
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members = defaultdict(list) # type: Dict[int, List[int]]
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user(self, name: str) -> Optional[FullNameInfo]:
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
# Pre-fetch data from the DB that is used in the bugdown thread
if message is not None:
assert message_realm is not None # ensured above if message is not None
if possible_words is None:
possible_words = set() # Set[str]
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'possible_words': possible_words,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException('Rendered content exceeds %s characters' %
(MAX_MESSAGE_LENGTH * 10,))
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s'
% (traceback.format_exc(), cleaned))
bugdown_logger.exception(exception_message)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
bugdown_stats_start()
ret = do_convert(content, message, message_realm,
possible_words, sent_by_bot, translate_emoticons,
mention_data, email_gateway)
bugdown_stats_finish()
return ret
| [
"str",
"Optional[DbData]",
"str",
"Element",
"Callable[[Element], Optional[_T]]",
"Element",
"Callable[[Element], Optional[_T]]",
"Element",
"str",
"str",
"Element",
"str",
"Dict[str, Any]",
"str",
"str",
"str",
"Element",
"str",
"Match[str]",
"markdown.Markdown",
"'Bugdown'",
"str",
"str",
"str",
"str",
"str",
"str",
"Dict[str, Any]",
"str",
"List[Dict[str, str]]",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"str",
"str",
"Element",
"Element",
"ResultWithFamily",
"Element",
"Element",
"int",
"str",
"Element",
"Match[str]",
"str",
"str",
"str",
"str",
"str",
"str",
"Match[str]",
"Match[str]",
"Match[str]",
"str",
"Match[str]",
"Match[str]",
"str",
"markdown.util.etree.Element",
"str",
"Optional[DbData]",
"str",
"Pattern",
"markdown.Markdown",
"Match[str]",
"Any",
"Any",
"List[str]",
"List[str]",
"List[Match[str]]",
"Match[str]",
"str",
"str",
"str",
"Match[str]",
"Match[str]",
"Match[str]",
"Match[str]",
"Match[str]",
"str",
"Iterable[str]",
"Match[str]",
"Any",
"Union[bool, int, List[Any]]",
"markdown.Markdown",
"Dict[str, Any]",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"markdown.Markdown",
"Dict[str, Any]",
"int",
"bool",
"List[Tuple[str, str, int]]",
"int",
"bool",
"int",
"str",
"Optional[int]",
"bool",
"str",
"str",
"int",
"Set[str]",
"int",
"Set[str]",
"int",
"str",
"int",
"str",
"str",
"str",
"str",
"int",
"int",
"Set[str]",
"Realm",
"Set[str]",
"str",
"str"
] | [
2292,
6463,
6487,
8367,
8401,
9299,
9345,
10549,
10571,
10590,
12509,
12524,
12545,
14193,
17161,
19385,
20106,
20646,
20871,
21409,
21437,
21675,
22719,
23147,
25231,
25882,
26092,
26705,
26913,
26945,
27003,
27053,
30457,
31520,
34655,
34914,
34934,
36756,
36773,
36847,
37919,
38013,
41674,
42333,
45443,
45464,
45790,
45811,
46085,
46501,
46948,
47374,
48177,
48415,
48782,
49332,
49576,
49831,
52200,
52223,
52866,
52879,
53221,
53691,
54240,
55071,
56144,
57235,
58045,
58636,
59211,
59248,
59554,
59859,
61296,
62254,
62496,
63416,
63625,
65133,
65604,
65619,
66174,
66205,
67444,
67617,
68919,
69516,
69813,
70039,
70392,
70561,
71036,
73154,
73185,
73330,
73350,
73756,
73820,
73857,
74446,
74463,
74860,
74890,
76815,
76903,
77230,
77243,
77738,
77755,
78838,
78852,
79249,
79292,
80134,
80259,
80747,
80889,
81010,
81033,
81336,
81357,
81798,
87545
] | [
2295,
6479,
6490,
8374,
8434,
9306,
9378,
10556,
10574,
10593,
12516,
12527,
12559,
14196,
17164,
19388,
20113,
20649,
20881,
21426,
21446,
21678,
22722,
23150,
25234,
25885,
26095,
26719,
26916,
26965,
27023,
27073,
30460,
31523,
34662,
34921,
34950,
36763,
36780,
36850,
37922,
38020,
41684,
42336,
45446,
45467,
45793,
45814,
46088,
46511,
46958,
47384,
48180,
48425,
48792,
49335,
49603,
49834,
52216,
52226,
52873,
52896,
53231,
53694,
54243,
55080,
56153,
57251,
58055,
58639,
59214,
59251,
59564,
59869,
61306,
62264,
62506,
63419,
63638,
65143,
65607,
65646,
66191,
66219,
67461,
67634,
68936,
69533,
69830,
70056,
70409,
70578,
71053,
73171,
73199,
73333,
73354,
73782,
73823,
73861,
74449,
74466,
74873,
74894,
76818,
76906,
77233,
77251,
77741,
77763,
78841,
78855,
79252,
79295,
80137,
80262,
80750,
80892,
81013,
81041,
81341,
81365,
81801,
87548
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/api_arguments_table_generator.py | import re
import os
import ujson
from django.utils.html import escape as escape_html
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.lib.openapi import get_openapi_parameters
from typing import Any, Dict, Optional, List
import markdown
REGEXP = re.compile(r'\{generate_api_arguments_table\|\s*(.+?)\s*\|\s*(.+)\s*\}')
class MarkdownArgumentsTableGenerator(Extension):
def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:
if configs is None:
configs = {}
self.config = {
'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],
}
for key, value in configs.items():
self.setConfig(key, value)
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'
)
class APIArgumentsTablePreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APIArgumentsTablePreprocessor, self).__init__(md)
self.base_path = config['base_path']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if not match:
continue
filename = match.group(1)
doc_name = match.group(2)
filename = os.path.expanduser(filename)
is_openapi_format = filename.endswith('.yaml')
if not os.path.isabs(filename):
parent_dir = self.base_path
filename = os.path.normpath(os.path.join(parent_dir, filename))
if is_openapi_format:
endpoint, method = doc_name.rsplit(':', 1)
arguments = [] # type: List[Dict[str, Any]]
try:
arguments = get_openapi_parameters(endpoint, method)
except KeyError as e:
# Don't raise an exception if the "parameters"
# field is missing; we assume that's because the
# endpoint doesn't accept any parameters
if e.args != ('parameters',):
raise e
else:
with open(filename, 'r') as fp:
json_obj = ujson.load(fp)
arguments = json_obj[doc_name]
if arguments:
text = self.render_table(arguments)
else:
text = ['This endpoint does not consume any arguments.']
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:
table = []
beginning = """
<table class="table">
<thead>
<tr>
<th>Argument</th>
<th>Example</th>
<th>Required</th>
<th>Description</th>
</tr>
</thead>
<tbody>
"""
tr = """
<tr>
<td><code>{argument}</code></td>
<td><code>{example}</code></td>
<td>{required}</td>
<td>{description}</td>
</tr>
"""
table.append(beginning)
md_engine = markdown.Markdown(extensions=[])
for argument in arguments:
description = argument['description']
oneof = ['`' + item + '`'
for item in argument.get('schema', {}).get('enum', [])]
if oneof:
description += '\nMust be one of: {}.'.format(', '.join(oneof))
default = argument.get('schema', {}).get('default')
if default is not None:
description += '\nDefaults to `{}`.'.format(ujson.dumps(default))
# TODO: Swagger allows indicating where the argument goes
# (path, querystring, form data...). A column in the table should
# be added for this.
table.append(tr.format(
argument=argument.get('argument') or argument.get('name'),
# Show this as JSON to avoid changing the quoting style, which
# may cause problems with JSON encoding.
example=escape_html(ujson.dumps(argument['example'])),
required='Yes' if argument.get('required') else 'No',
description=md_engine.convert(description),
))
table.append("</tbody>")
table.append("</table>")
return table
def makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:
return MarkdownArgumentsTableGenerator(kwargs)
| [
"markdown.Markdown",
"Dict[str, Any]",
"markdown.Markdown",
"Dict[str, Any]",
"List[str]",
"List[Dict[str, Any]]",
"Any",
"str"
] | [
811,
842,
1088,
1115,
1275,
3532,
5265,
5280
] | [
828,
856,
1105,
1129,
1284,
3552,
5268,
5283
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/api_code_examples.py | import re
import os
import sys
import json
import inspect
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List
import markdown
import zerver.lib.api_test_helpers
from zerver.lib.openapi import get_openapi_fixture
MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = CODE_EXAMPLE_REGEX.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
snippet.append(' print(result)')
snippet.append('\n')
source = source[end + 1:]
return extract_python_code_example(source, snippet)
def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:
method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_python_code_example(function_source_lines, [])
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('```')
return code_example
SUPPORTED_LANGUAGES = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
}
} # type: Dict[str, Any]
class APICodeExamplesGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APICodeExamplesPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language = match.group(2)
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
else:
text = self.render_fixture(function)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
# We assume that if the function we're rendering starts with a slash
# it's a path in the endpoint and therefore it uses the new OpenAPI
# format.
if function.startswith('/'):
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
else:
fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function]
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('```')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:
return APICodeExamplesGenerator(kwargs)
| [
"List[str]",
"List[str]",
"str",
"markdown.Markdown",
"Dict[str, Any]",
"markdown.Markdown",
"Dict[str, Any]",
"List[str]",
"str",
"Any",
"str"
] | [
886,
906,
1536,
2507,
2538,
2779,
2806,
2919,
4590,
5365,
5380
] | [
895,
915,
1539,
2524,
2552,
2796,
2820,
2928,
4593,
5368,
5383
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/fenced_code.py | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
import subprocess
import markdown
from django.utils.html import escape
from markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension
from zerver.lib.tex import render_tex
from typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union
# Global vars
FENCE_RE = re.compile("""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(
\\{?\\.?
(?P<lang>
[a-zA-Z0-9_+-./#]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
[ ]* # spaces
$
""", re.VERBOSE)
CODE_WRAP = '<pre><code%s>%s\n</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
# Newer versions of Python-Markdown (starting at 2.3?) have
# a normalize_whitespace preprocessor that needs to go first.
position = ('>normalize_whitespace'
if 'normalize_whitespace' in md.preprocessors
else '_begin')
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
position)
class BaseHandler:
def handle_line(self, line: str) -> None:
raise NotImplementedError()
def done(self) -> None:
raise NotImplementedError()
def generic_handler(processor: Any, output: MutableSequence[str], fence: str, lang: str) -> BaseHandler:
if lang in ('quote', 'quoted'):
return QuoteHandler(processor, output, fence)
elif lang in ('math', 'tex', 'latex'):
return TexHandler(processor, output, fence)
else:
return CodeHandler(processor, output, fence, lang)
def check_for_new_fence(processor: Any, output: MutableSequence[str], line: str) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
handler = generic_handler(processor, output, fence, lang)
processor.push(handler)
else:
output.append(line)
class OuterHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str]) -> None:
self.output = output
self.processor = processor
def handle_line(self, line: str) -> None:
check_for_new_fence(self.processor, self.output, line)
def done(self) -> None:
self.processor.pop()
class CodeHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str], fence: str, lang: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lang = lang
self.lines = [] # type: List[str]
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_code(self.lang, text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class QuoteHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines = [] # type: List[str]
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_quote(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class TexHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines = [] # type: List[str]
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_tex(text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def __init__(self, md: markdown.Markdown) -> None:
markdown.preprocessors.Preprocessor.__init__(self, md)
self.checked_for_codehilite = False
self.codehilite_conf = {} # type: Dict[str, List[Any]]
def push(self, handler: BaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
""" Match and store Fenced Code Blocks in the HtmlStash. """
output = [] # type: List[str]
processor = self
self.handlers = [] # type: List[BaseHandler]
handler = OuterHandler(processor, output)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Bugdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != '':
output.append('')
return output
def format_code(self, lang: str, text: str) -> str:
if lang:
langclass = LANG_TAG % (lang,)
else:
langclass = ''
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.markdown.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(text,
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
use_pygments=self.codehilite_conf['use_pygments'][0],
lang=(lang or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (langclass, self._escape(text))
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines if line != ''))
return "\n\n".join(quoted_paragraphs)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' +
escape(paragraph) + '</span>')
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.markdown.htmlStash.store(code, safe=True)
def _escape(self, txt: str) -> str:
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(*args, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"markdown.Markdown",
"Dict[str, Any]",
"str",
"Any",
"MutableSequence[str]",
"str",
"str",
"Any",
"MutableSequence[str]",
"str",
"Any",
"MutableSequence[str]",
"str",
"Any",
"MutableSequence[str]",
"str",
"str",
"str",
"Any",
"MutableSequence[str]",
"str",
"str",
"Any",
"MutableSequence[str]",
"str",
"str",
"markdown.Markdown",
"BaseHandler",
"Iterable[str]",
"str",
"str",
"str",
"str",
"str",
"str",
"Any",
"None"
] | [
2853,
2884,
3499,
3646,
3659,
3688,
3699,
4010,
4023,
4051,
4372,
4385,
4513,
4715,
4728,
4757,
4768,
4974,
5530,
5543,
5572,
5753,
6270,
6283,
6312,
6493,
7057,
7286,
7428,
8228,
8239,
9614,
9948,
10431,
10534,
10788,
10803
] | [
2870,
2898,
3502,
3649,
3679,
3691,
3702,
4013,
4043,
4054,
4375,
4405,
4516,
4718,
4748,
4760,
4771,
4977,
5533,
5563,
5575,
5756,
6273,
6303,
6315,
6496,
7074,
7297,
7441,
8231,
8242,
9617,
9951,
10434,
10537,
10791,
10807
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/help_emoticon_translations_table.py | import re
import markdown
from typing import Any, Dict, List, Optional, Union
from typing.re import Match
from markdown.preprocessors import Preprocessor
from zerver.lib.emoji import EMOTICON_CONVERSIONS, name_to_codepoint
REGEXP = re.compile(r'\{emoticon_translations\}')
TABLE_HTML = """
<table>
<thead>
<tr>
<th align="center">Emoticon</th>
<th align="center">Emoji</th>
</tr>
</thead>
<tbody>
{body}
</tbody>
</table>
"""
ROW_HTML = """
<tr>
<td align="center"><code>{emoticon}</code></td>
<td align="center">
<img
src="/static/generated/emoji/images-google-64/{codepoint}.png"
alt="{name}"
class="emoji-big">
</td>
</tr>
"""
class EmoticonTranslationsHelpExtension(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add SettingHelpExtension to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('emoticon_translations', EmoticonTranslation(), '_end')
class EmoticonTranslation(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
for loc, line in enumerate(lines):
match = REGEXP.search(line)
if match:
text = self.handleMatch(match)
lines = lines[:loc] + text + lines[loc+1:]
break
return lines
def handleMatch(self, match: Match[str]) -> List[str]:
rows = [
ROW_HTML.format(emoticon=emoticon,
name=name.strip(':'),
codepoint=name_to_codepoint[name.strip(':')])
for emoticon, name in EMOTICON_CONVERSIONS.items()
]
body = '\n'.join(rows).strip()
return TABLE_HTML.format(body=body).strip().splitlines()
def makeExtension(*args: Any, **kwargs: Any) -> EmoticonTranslationsHelpExtension:
return EmoticonTranslationsHelpExtension(*args, **kwargs)
| [
"markdown.Markdown",
"Dict[str, Any]",
"List[str]",
"Match[str]",
"Any",
"Any"
] | [
849,
880,
1160,
1473,
1890,
1905
] | [
866,
894,
1169,
1483,
1893,
1908
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/help_relative_links.py | import re
import markdown
from typing import Any, Dict, List, Optional, Union
from typing.re import Match
from markdown.preprocessors import Preprocessor
# There is a lot of duplicated code between this file and
# help_settings_links.py. So if you're making a change here consider making
# it there as well.
REGEXP = re.compile(r'\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}')
gear_info = {
# The pattern is key: [name, link]
# key is from REGEXP: `{relative|gear|key}`
# name is what the item is called in the gear menu: `Select **name**.`
# link is used for relative links: `Select [name](link).`
'manage-streams': ['Manage streams', '/#streams/subscribed'],
'settings': ['Settings', '/#settings/your-account'],
'manage-organization': ['Manage organization', '/#organization/organization-profile'],
'integrations': ['Integrations', '/integrations'],
'stats': ['Statistics', '/stats'],
'plans': ['Plans and pricing', '/plans'],
'billing': ['Billing', '/billing'],
'invite': ['Invite users', '/#invite'],
}
gear_instructions = """
1. From your desktop, click on the **gear**
(<i class="fa fa-cog"></i>) in the upper right corner.
1. Select %(item)s.
"""
def gear_handle_match(key: str) -> str:
if relative_help_links:
item = '[%s](%s)' % (gear_info[key][0], gear_info[key][1])
else:
item = '**%s**' % (gear_info[key][0],)
return gear_instructions % {'item': item}
stream_info = {
'all': ['All streams', '/#streams/all'],
'subscribed': ['Your streams', '/#streams/subscribed'],
}
stream_instructions_no_link = """
1. From your desktop, click on the **gear**
(<i class="fa fa-cog"></i>) in the upper right corner.
1. Click **Manage streams**.
"""
def stream_handle_match(key: str) -> str:
if relative_help_links:
return "1. Go to [%s](%s)." % (stream_info[key][0], stream_info[key][1])
if key == 'all':
return stream_instructions_no_link + "\n\n1. Click **All streams** in the upper left."
return stream_instructions_no_link
LINK_TYPE_HANDLERS = {
'gear': gear_handle_match,
'stream': stream_handle_match,
}
class RelativeLinksHelpExtension(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add RelativeLinksHelpExtension to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('help_relative_links', RelativeLinks(), '_begin')
relative_help_links = None # type: Optional[bool]
def set_relative_help_links(value: bool) -> None:
global relative_help_links
relative_help_links = value
class RelativeLinks(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
return LINK_TYPE_HANDLERS[match.group('link_type')](match.group('key'))
def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
return RelativeLinksHelpExtension(*args, **kwargs)
| [
"str",
"str",
"markdown.Markdown",
"Dict[str, Any]",
"bool",
"List[str]",
"Match[str]",
"Any",
"Any"
] | [
1240,
1777,
2235,
2266,
2566,
2705,
3655,
3781,
3796
] | [
1243,
1780,
2252,
2280,
2570,
2714,
3665,
3784,
3799
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/help_settings_links.py | import re
import markdown
from typing import Any, Dict, List, Optional, Union
from typing.re import Match
from markdown.preprocessors import Preprocessor
# There is a lot of duplicated code between this file and
# help_relative_links.py. So if you're making a change here consider making
# it there as well.
REGEXP = re.compile(r'\{settings_tab\|(?P<setting_identifier>.*?)\}')
link_mapping = {
# a mapping from the setting identifier that is the same as the final URL
# breadcrumb to that setting to the name of its setting type, the setting
# name as it appears in the user interface, and a relative link that can
# be used to get to that setting
'your-account': ['Settings', 'Your account', '/#settings/your-account'],
'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],
'notifications': ['Settings', 'Notifications', '/#settings/notifications'],
'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],
'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],
'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],
'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],
'organization-profile': ['Manage organization', 'Organization profile',
'/#organization/organization-profile'],
'organization-settings': ['Manage organization', 'Organization settings',
'/#organization/organization-settings'],
'organization-permissions': ['Manage organization', 'Organization permissions',
'/#organization/organization-permissions'],
'emoji-settings': ['Manage organization', 'Custom emoji',
'/#organization/emoji-settings'],
'auth-methods': ['Manage organization', 'Authentication methods',
'/#organization/auth-methods'],
'user-groups-admin': ['Manage organization', 'User groups',
'/#organization/user-groups-admin'],
'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],
'deactivated-users-admin': ['Manage organization', 'Deactivated users',
'/#organization/deactivated-users-admin'],
'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],
'default-streams-list': ['Manage organization', 'Default streams',
'/#organization/default-streams-list'],
'filter-settings': ['Manage organization', 'Linkifiers',
'/#organization/filter-settings'],
'profile-field-settings': ['Manage organization', 'Custom profile fields',
'/#organization/profile-field-settings'],
'invites-list-admin': ['Manage organization', 'Invitations',
'/#organization/invites-list-admin'],
}
settings_markdown = """
1. From your desktop, click on the **gear**
(<i class="fa fa-cog"></i>) in the upper right corner.
1. Select **%(setting_type_name)s**.
1. On the left, click %(setting_reference)s.
"""
class SettingHelpExtension(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add SettingHelpExtension to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('setting', Setting(), '_begin')
relative_settings_links = None # type: Optional[bool]
def set_relative_settings_links(value: bool) -> None:
global relative_settings_links
relative_settings_links = value
class Setting(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
setting_identifier = match.group('setting_identifier')
setting_type_name = link_mapping[setting_identifier][0]
setting_name = link_mapping[setting_identifier][1]
setting_link = link_mapping[setting_identifier][2]
if relative_settings_links:
return "1. Go to [%s](%s)." % (setting_name, setting_link)
return settings_markdown % {'setting_type_name': setting_type_name,
'setting_reference': "**%s**" % (setting_name,)}
def makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:
return SettingHelpExtension(*args, **kwargs)
| [
"markdown.Markdown",
"Dict[str, Any]",
"bool",
"List[str]",
"Match[str]",
"Any",
"Any"
] | [
3213,
3244,
3528,
3669,
4619,
5178,
5193
] | [
3230,
3258,
3532,
3678,
4629,
5181,
5196
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/nested_code_blocks.py | from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
from typing import Any, Dict, Optional, List, Tuple
import markdown
from xml.etree.cElementTree import Element
from zerver.lib.bugdown import walk_tree_with_family, ResultWithFamily
class NestedCodeBlocksRenderer(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.treeprocessors.add(
'nested_code_blocks',
NestedCodeBlocksRendererTreeProcessor(md, self.getConfigs()),
'_end'
)
class NestedCodeBlocksRendererTreeProcessor(markdown.treeprocessors.Treeprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(NestedCodeBlocksRendererTreeProcessor, self).__init__(md)
def run(self, root: Element) -> None:
code_tags = walk_tree_with_family(root, self.get_code_tags)
nested_code_blocks = self.get_nested_code_blocks(code_tags)
for block in nested_code_blocks:
tag, text = block.result
codehilite_block = self.get_codehilite_block(text)
self.replace_element(block.family.grandparent,
codehilite_block,
block.family.parent)
def get_code_tags(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "code":
return (e.tag, e.text)
return None
def get_nested_code_blocks(
self, code_tags: List[ResultWithFamily]
) -> List[ResultWithFamily]:
nested_code_blocks = []
for code_tag in code_tags:
parent = code_tag.family.parent # type: Any
grandparent = code_tag.family.grandparent # type: Any
if parent.tag == "p" and grandparent.tag == "li":
# if the parent (<p>) has no text, and no children,
# that means that the <code> element inside is its
# only thing inside the bullet, we can confidently say
# that this is a nested code block
if parent.text is None and len(list(parent)) == 1 and len(list(parent.itertext())) == 1:
nested_code_blocks.append(code_tag)
return nested_code_blocks
def get_codehilite_block(self, code_block_text: str) -> Element:
div = markdown.util.etree.Element("div")
div.set("class", "codehilite")
pre = markdown.util.etree.SubElement(div, "pre")
pre.text = code_block_text
return div
def replace_element(
self, parent: Optional[Element],
replacement: markdown.util.etree.Element,
element_to_replace: Element
) -> None:
if parent is None:
return
children = parent.getchildren()
for index, child in enumerate(children):
if child is element_to_replace:
parent.insert(index, replacement)
parent.remove(element_to_replace)
def makeExtension(*args: Any, **kwargs: str) -> NestedCodeBlocksRenderer:
return NestedCodeBlocksRenderer(kwargs)
| [
"markdown.Markdown",
"Dict[str, Any]",
"markdown.Markdown",
"Dict[str, Any]",
"Element",
"Element",
"List[ResultWithFamily]",
"str",
"Optional[Element]",
"markdown.util.etree.Element",
"Element",
"Any",
"str"
] | [
352,
383,
688,
715,
837,
1328,
1522,
2337,
2605,
2649,
2710,
3039,
3054
] | [
369,
397,
705,
729,
844,
1335,
1544,
2340,
2622,
2676,
2717,
3042,
3057
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/tabbed_sections.py | import re
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List, Tuple
import markdown
START_TABBED_SECTION_REGEX = re.compile(r'^\{start_tabs\}$')
END_TABBED_SECTION_REGEX = re.compile(r'^\{end_tabs\}$')
TAB_CONTENT_REGEX = re.compile(r'^\{tab\|\s*(.+?)\s*\}$')
CODE_SECTION_TEMPLATE = """
<div class="code-section" markdown="1">
{nav_bar}
<div class="blocks">
{blocks}
</div>
</div>
""".strip()
NAV_BAR_TEMPLATE = """
<ul class="nav">
{tabs}
</ul>
""".strip()
NAV_LIST_ITEM_TEMPLATE = """
<li data-language="{data_language}">{name}</li>
""".strip()
DIV_TAB_CONTENT_TEMPLATE = """
<div data-language="{data_language}" markdown="1">
{content}
</div>
""".strip()
# If adding new entries here, also check if you need to update
# tabbed-instructions.js
TAB_DISPLAY_NAMES = {
'desktop-web': 'Desktop/Web',
'ios': 'iOS',
'android': 'Android',
'mac': 'macOS',
'windows': 'Windows',
'linux': 'Linux',
'python': 'Python',
'js': 'JavaScript',
'curl': 'curl',
'zulip-send': 'zulip-send',
'cloud': 'HipChat Cloud',
'server': 'HipChat Server or Data Center',
}
class TabbedSectionsGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'tabbed_sections', TabbedSectionsPreprocessor(md, self.getConfigs()), '_end')
class TabbedSectionsPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(TabbedSectionsPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
tab_section = self.parse_tabs(lines)
while tab_section:
nav_bar = self.generate_nav_bar(tab_section)
content_blocks = self.generate_content_blocks(tab_section, lines)
rendered_tabs = CODE_SECTION_TEMPLATE.format(
nav_bar=nav_bar, blocks=content_blocks)
start = tab_section['start_tabs_index']
end = tab_section['end_tabs_index'] + 1
lines = lines[:start] + [rendered_tabs] + lines[end:]
tab_section = self.parse_tabs(lines)
return lines
def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
tab_content_blocks = []
for index, tab in enumerate(tab_section['tabs']):
start_index = tab['start'] + 1
try:
# If there are more tabs, we can use the starting index
# of the next tab as the ending index of the previous one
end_index = tab_section['tabs'][index + 1]['start']
except IndexError:
# Otherwise, just use the end of the entire section
end_index = tab_section['end_tabs_index']
content = '\n'.join(lines[start_index:end_index]).strip()
tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
data_language=tab['tab_name'],
# Wrapping the content in two newlines is necessary here.
# If we don't do this, the inner Markdown does not get
# rendered properly.
content='\n{}\n'.format(content))
tab_content_blocks.append(tab_content_block)
return '\n'.join(tab_content_blocks)
def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
li_elements = []
for tab in tab_section['tabs']:
li = NAV_LIST_ITEM_TEMPLATE.format(
data_language=tab.get('tab_name'),
name=TAB_DISPLAY_NAMES.get(tab.get('tab_name')))
li_elements.append(li)
return NAV_BAR_TEMPLATE.format(tabs='\n'.join(li_elements))
def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
block = {} # type: Dict[str, Any]
for index, line in enumerate(lines):
start_match = START_TABBED_SECTION_REGEX.search(line)
if start_match:
block['start_tabs_index'] = index
tab_content_match = TAB_CONTENT_REGEX.search(line)
if tab_content_match:
block.setdefault('tabs', [])
tab = {'start': index,
'tab_name': tab_content_match.group(1)}
block['tabs'].append(tab)
end_match = END_TABBED_SECTION_REGEX.search(line)
if end_match:
block['end_tabs_index'] = index
break
return block
def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
return TabbedSectionsGenerator(kwargs)
| [
"markdown.Markdown",
"Dict[str, Any]",
"markdown.Markdown",
"Dict[str, Any]",
"List[str]",
"Dict[str, Any]",
"List[str]",
"Dict[str, Any]",
"List[str]",
"Any",
"str"
] | [
1262,
1293,
1514,
1541,
1653,
2292,
2315,
3417,
3806,
4571,
4586
] | [
1279,
1307,
1531,
1555,
1662,
2306,
2324,
3431,
3815,
4574,
4589
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bugdown/testing_mocks.py | # -*- coding: utf-8 -*-
from typing import Any, Dict, Optional
import ujson
NORMAL_TWEET = """{
"created_at": "Sat Sep 10 22:23:38 +0000 2011",
"favorite_count": 1,
"full_text": "@twitter meets @seepicturely at #tcdisrupt cc.@boscomonkey @episod http://t.co/6J2EgYM",
"hashtags": [
{
"text": "tcdisrupt"
}
],
"id": 112652479837110270,
"id_str": "112652479837110273",
"in_reply_to_screen_name": "Twitter",
"in_reply_to_user_id": 783214,
"lang": "en",
"retweet_count": 4,
"source": "<a href=\\"http://instagram.com\\" rel=\\"nofollow\\">Instagram</a>",
"urls": [
{
"expanded_url": "http://instagr.am/p/MuW67/",
"url": "http://t.co/6J2EgYM"
}
],
"user": {
"created_at": "Mon May 16 20:07:59 +0000 2011",
"description": "Eoin's photography account. See @mceoin for tweets.",
"followers_count": 3,
"id": 299862462,
"lang": "en",
"location": "Twitter",
"name": "Eoin McMillan",
"profile_background_color": "131516",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme14/bg.gif",
"profile_background_tile": true,
"profile_image_url": "http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png",
"profile_link_color": "009999",
"profile_sidebar_fill_color": "EFEFEF",
"profile_text_color": "333333",
"screen_name": "imeoin",
"statuses_count": 278,
"url": "http://t.co/p9hKpiGMyN"
},
"user_mentions": [
{
"id": 783214,
"name": "Twitter",
"screen_name": "Twitter"
},
{
"id": 14792670,
"name": "Bosco So",
"screen_name": "boscomonkey"
},
{
"id": 819797,
"name": "Taylor Singletary",
"screen_name": "episod"
}
]
}"""
MENTION_IN_LINK_TWEET = """{
"created_at": "Sat Sep 10 22:23:38 +0000 2011",
"favorite_count": 1,
"full_text": "http://t.co/@foo",
"hashtags": [
{
"text": "tcdisrupt"
}
],
"id": 112652479837110270,
"id_str": "112652479837110273",
"in_reply_to_screen_name": "Twitter",
"in_reply_to_user_id": 783214,
"lang": "en",
"retweet_count": 4,
"source": "<a href=\\"http://instagram.com\\" rel=\\"nofollow\\">Instagram</a>",
"urls": [
{
"expanded_url": "http://foo.com",
"url": "http://t.co/@foo"
}
],
"user": {
"created_at": "Mon May 16 20:07:59 +0000 2011",
"description": "Eoin's photography account. See @mceoin for tweets.",
"followers_count": 3,
"id": 299862462,
"lang": "en",
"location": "Twitter",
"name": "Eoin McMillan",
"profile_background_color": "131516",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme14/bg.gif",
"profile_background_tile": true,
"profile_image_url": "http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png",
"profile_link_color": "009999",
"profile_sidebar_fill_color": "EFEFEF",
"profile_text_color": "333333",
"screen_name": "imeoin",
"statuses_count": 278,
"url": "http://t.co/p9hKpiGMyN"
},
"user_mentions": [
{
"id": 783214,
"name": "Foo",
"screen_name": "foo"
}
]
}"""
MEDIA_TWEET = """{
"created_at": "Sat Sep 10 22:23:38 +0000 2011",
"favorite_count": 1,
"full_text": "http://t.co/xo7pAhK6n3",
"id": 112652479837110270,
"id_str": "112652479837110273",
"in_reply_to_screen_name": "Twitter",
"in_reply_to_user_id": 783214,
"lang": "en",
"media": [
{
"display_url": "pic.twitter.com/xo7pAhK6n3",
"expanded_url": "http://twitter.com/NEVNBoston/status/421654515616849920/photo/1",
"id": 421654515495211010,
"media_url": "http://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg",
"media_url_https": "https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg",
"sizes": {"large": {"h": 700, "resize": "fit", "w": 1024},
"medium": {"h": 410, "resize": "fit", "w": 599},
"small": {"h": 232, "resize": "fit", "w": 340},
"thumb": {"h": 150, "resize": "crop", "w": 150}},
"type": "photo",
"url": "http://t.co/xo7pAhK6n3"}
],
"retweet_count": 4,
"source": "<a href=\\"http://instagram.com\\" rel=\\"nofollow\\">Instagram</a>",
"user": {
"created_at": "Mon May 16 20:07:59 +0000 2011",
"description": "Eoin's photography account. See @mceoin for tweets.",
"followers_count": 3,
"id": 299862462,
"lang": "en",
"location": "Twitter",
"name": "Eoin McMillan",
"profile_background_color": "131516",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme14/bg.gif",
"profile_background_tile": true,
"profile_image_url": "http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png",
"profile_link_color": "009999",
"profile_sidebar_fill_color": "EFEFEF",
"profile_text_color": "333333",
"screen_name": "imeoin",
"statuses_count": 278,
"url": "http://t.co/p9hKpiGMyN"
},
"user_mentions": [
{
"id": 783214,
"name": "Foo",
"screen_name": "foo"
}
]
}"""
EMOJI_TWEET = """{
"created_at": "Sat Sep 10 22:23:38 +0000 2011",
"favorite_count": 1,
"full_text": "Zulip is 💯% open-source!",
"hashtags": [
{
"text": "tcdisrupt"
}
],
"id": 112652479837110270,
"id_str": "112652479837110273",
"in_reply_to_screen_name": "Twitter",
"in_reply_to_user_id": 783214,
"lang": "en",
"retweet_count": 4,
"source": "<a href=\\"http://instagram.com\\" rel=\\"nofollow\\">Instagram</a>",
"user": {
"created_at": "Mon May 16 20:07:59 +0000 2011",
"description": "Eoin's photography account. See @mceoin for tweets.",
"followers_count": 3,
"id": 299862462,
"lang": "en",
"location": "Twitter",
"name": "Eoin McMillan",
"profile_background_color": "131516",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme14/bg.gif",
"profile_background_tile": true,
"profile_image_url": "http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png",
"profile_link_color": "009999",
"profile_sidebar_fill_color": "EFEFEF",
"profile_text_color": "333333",
"screen_name": "imeoin",
"statuses_count": 278,
"url": "http://t.co/p9hKpiGMyN"
},
"user_mentions": [
{
"id": 783214,
"name": "Twitter",
"screen_name": "Twitter"
},
{
"id": 14792670,
"name": "Bosco So",
"screen_name": "boscomonkey"
},
{
"id": 819797,
"name": "Taylor Singletary",
"screen_name": "episod"
}
]
}"""
def twitter(tweet_id: str) -> Optional[Dict[str, Any]]:
if tweet_id in ["112652479837110273", "287977969287315456", "287977969287315457"]:
return ujson.loads(NORMAL_TWEET)
elif tweet_id == "287977969287315458":
return ujson.loads(MENTION_IN_LINK_TWEET)
elif tweet_id == "287977969287315459":
return ujson.loads(MEDIA_TWEET)
elif tweet_id == "287977969287315460":
return ujson.loads(EMOJI_TWEET)
else:
return None
| [
"str"
] | [
7401
] | [
7404
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/bulk_create.py | from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple
from zerver.lib.initial_password import initial_password
from zerver.models import Realm, Stream, UserProfile, Huddle, \
Subscription, Recipient, Client, RealmAuditLog, get_huddle_hash
from zerver.lib.create_user import create_user_profile
def bulk_create_users(realm: Realm,
users_raw: Set[Tuple[str, str, str, bool]],
bot_type: Optional[int]=None,
bot_owner: Optional[UserProfile]=None,
tos_version: Optional[str]=None,
timezone: str="") -> None:
"""
Creates and saves a UserProfile with the given email.
Has some code based off of UserManage.create_user, but doesn't .save()
"""
existing_users = frozenset(UserProfile.objects.filter(
realm=realm).values_list('email', flat=True))
users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])
# Now create user_profiles
profiles_to_create = [] # type: List[UserProfile]
for (email, full_name, short_name, active) in users:
profile = create_user_profile(realm, email,
initial_password(email), active, bot_type,
full_name, short_name, bot_owner, False, tos_version,
timezone, tutorial_status=UserProfile.TUTORIAL_FINISHED,
enter_sends=True)
profiles_to_create.append(profile)
UserProfile.objects.bulk_create(profiles_to_create)
RealmAuditLog.objects.bulk_create(
[RealmAuditLog(realm=realm, modified_user=profile_,
event_type=RealmAuditLog.USER_CREATED, event_time=profile_.date_joined)
for profile_ in profiles_to_create])
profiles_by_email = {} # type: Dict[str, UserProfile]
profiles_by_id = {} # type: Dict[int, UserProfile]
for profile in UserProfile.objects.select_related().filter(realm=realm):
profiles_by_email[profile.email] = profile
profiles_by_id[profile.id] = profile
recipients_to_create = [] # type: List[Recipient]
for (email, full_name, short_name, active) in users:
recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,
type=Recipient.PERSONAL))
Recipient.objects.bulk_create(recipients_to_create)
recipients_by_email = {} # type: Dict[str, Recipient]
for recipient in recipients_to_create:
recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient
subscriptions_to_create = [] # type: List[Subscription]
for (email, full_name, short_name, active) in users:
subscriptions_to_create.append(
Subscription(user_profile_id=profiles_by_email[email].id,
recipient=recipients_by_email[email]))
Subscription.objects.bulk_create(subscriptions_to_create)
# This is only sed in populate_db, so doesn't realy need tests
def bulk_create_streams(realm: Realm,
stream_dict: Dict[str, Dict[str, Any]]) -> None: # nocoverage
existing_streams = frozenset([name.lower() for name in
Stream.objects.filter(realm=realm)
.values_list('name', flat=True)])
streams_to_create = [] # type: List[Stream]
for name, options in stream_dict.items():
if 'history_public_to_subscribers' not in options:
options['history_public_to_subscribers'] = (
not options.get("invite_only", False) and not realm.is_zephyr_mirror_realm)
if name.lower() not in existing_streams:
streams_to_create.append(
Stream(
realm=realm,
name=name,
description=options["description"],
invite_only=options.get("invite_only", False),
is_announcement_only=options.get("is_announcement_only", False),
history_public_to_subscribers=options["history_public_to_subscribers"],
is_web_public=options.get("is_web_public", False),
is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
)
)
# Sort streams by name before creating them so that we can have a
# reliable ordering of `stream_id` across different python versions.
# This is required for test fixtures which contain `stream_id`. Prior
# to python 3.3 hashes were not randomized but after a security fix
# hash randomization was enabled in python 3.3 which made iteration
# of dictionaries and sets completely unpredictable. Here the order
# of elements while iterating `stream_dict` will be completely random
# for python 3.3 and later versions.
streams_to_create.sort(key=lambda x: x.name)
Stream.objects.bulk_create(streams_to_create)
recipients_to_create = [] # type: List[Recipient]
for stream in Stream.objects.filter(realm=realm).values('id', 'name'):
if stream['name'].lower() not in existing_streams:
recipients_to_create.append(Recipient(type_id=stream['id'],
type=Recipient.STREAM))
Recipient.objects.bulk_create(recipients_to_create)
| [
"Realm",
"Set[Tuple[str, str, str, bool]]",
"Realm",
"Dict[str, Dict[str, Any]]"
] | [
351,
391,
3104,
3148
] | [
356,
422,
3109,
3173
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/cache.py | # See https://zulip.readthedocs.io/en/latest/subsystems/caching.html for docs
from functools import wraps
from django.utils.lru_cache import lru_cache
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import cast, Any, Callable, Dict, Iterable, List, Optional, Union, Set, TypeVar, Tuple
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import time
import base64
import random
import sys
import os
import hashlib
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
ReturnT = TypeVar('ReturnT') # Useful for matching return types via Callable[..., ReturnT]
class NotFoundInCache(Exception):
pass
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time() -> float:
return remote_cache_total_time
def get_remote_cache_requests() -> int:
return remote_cache_total_requests
def remote_cache_stats_start() -> None:
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish() -> None:
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix() -> str:
if settings.CASPER_TESTS:
# This sets the prefix for the benefit of the Casper tests.
#
# Having a fixed key is OK since we don't support running
# multiple copies of the casper tests at the same time anyway.
return 'casper_tests:'
elif settings.TEST_SUITE:
# The Python tests overwrite KEY_PREFIX on each test, but use
# this codepath as well, just to save running the more complex
# code below for reading the normal key prefix.
return 'django_tests_unused:'
# directory `var` should exist in production
os.makedirs(os.path.join(settings.DEPLOY_ROOT, "var"), exist_ok=True)
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: str
def bounce_key_prefix_for_testing(test_name: str) -> None:
global KEY_PREFIX
KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':'
# We are taking the hash of the KEY_PREFIX to decrease the size of the key.
# Memcached keys should have a length of less than 256.
KEY_PREFIX = hashlib.sha1(KEY_PREFIX.encode('utf-8')).hexdigest()
def get_cache_backend(cache_name: Optional[str]) -> BaseCache:
if cache_name is None:
return djcache
return caches[cache_name]
def get_cache_with_key(
keyfunc: Callable[..., str],
cache_name: Optional[str]=None
) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:
"""
The main goal of this function getting value from the cache like in the "cache_with_key".
A cache value can contain any data including the "None", so
here used exception for case if value isn't found in the cache.
"""
def decorator(func: Callable[..., ReturnT]) -> (Callable[..., ReturnT]):
@wraps(func)
def func_with_caching(*args: Any, **kwargs: Any) -> Callable[..., ReturnT]:
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
if val is not None:
return val[0]
raise NotFoundInCache()
return func_with_caching
return decorator
def cache_with_key(
keyfunc: Callable[..., str], cache_name: Optional[str]=None,
timeout: Optional[int]=None, with_statsd_key: Optional[str]=None
) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
@wraps(func)
def func_with_caching(*args: Any, **kwargs: Any) -> ReturnT:
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key: str, val: Any, cache_name: Optional[str]=None, timeout: Optional[int]=None) -> None:
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key: str, cache_name: Optional[str]=None) -> Any:
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]:
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items: Dict[str, Any], cache_name: Optional[str]=None,
timeout: Optional[int]=None) -> None:
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key: str, cache_name: Optional[str]=None) -> None:
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items: Iterable[str], cache_name: Optional[str]=None) -> None:
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Generic_bulk_cached fetch and its helpers
ObjKT = TypeVar('ObjKT')
ItemT = TypeVar('ItemT')
CompressedItemT = TypeVar('CompressedItemT')
def default_extractor(obj: CompressedItemT) -> ItemT:
return obj # type: ignore # Need a type assert that ItemT=CompressedItemT
def default_setter(obj: ItemT) -> CompressedItemT:
return obj # type: ignore # Need a type assert that ItemT=CompressedItemT
def default_id_fetcher(obj: ItemT) -> ObjKT:
return obj.id # type: ignore # Need ItemT/CompressedItemT typevars to be a Django protocol
def default_cache_transformer(obj: ItemT) -> ItemT:
return obj
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
def generic_bulk_cached_fetch(
cache_key_function: Callable[[ObjKT], str],
query_function: Callable[[List[ObjKT]], Iterable[Any]],
object_ids: Iterable[ObjKT],
extractor: Callable[[CompressedItemT], ItemT] = default_extractor,
setter: Callable[[ItemT], CompressedItemT] = default_setter,
id_fetcher: Callable[[ItemT], ObjKT] = default_id_fetcher,
cache_transformer: Callable[[ItemT], ItemT] = default_cache_transformer
) -> Dict[ObjKT, ItemT]:
cache_keys = {} # type: Dict[ObjKT, str]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects_compressed = cache_get_many([cache_keys[object_id]
for object_id in object_ids]) # type: Dict[str, Tuple[CompressedItemT]]
cached_objects = {} # type: Dict[str, ItemT]
for (key, val) in cached_objects_compressed.items():
cached_objects[key] = extractor(cached_objects_compressed[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[str, Tuple[CompressedItemT]]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__)
@wraps(func)
def keyfunc(*args: Any, **kwargs: Any) -> str:
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def preview_url_cache_key(url: str) -> str:
return "preview_url:%s" % (make_safe_digest(url))
def display_recipient_cache_key(recipient_id: int) -> str:
return "display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email: str) -> str:
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return 'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_cache_key_id(email: str, realm_id: int) -> str:
return u"user_profile:%s:%s" % (make_safe_digest(email.strip()), realm_id,)
def user_profile_cache_key(email: str, realm: 'Realm') -> str:
return user_profile_cache_key_id(email, realm.id)
def bot_profile_cache_key(email: str) -> str:
return "bot_profile:%s" % (make_safe_digest(email.strip()))
def user_profile_by_id_cache_key(user_profile_id: int) -> str:
return "user_profile_by_id:%s" % (user_profile_id,)
def user_profile_by_api_key_cache_key(api_key: str) -> str:
return "user_profile_by_api_key:%s" % (api_key,)
realm_user_dict_fields = [
'id', 'full_name', 'short_name', 'email',
'avatar_source', 'avatar_version', 'is_active',
'is_realm_admin', 'is_bot', 'realm_id', 'timezone',
'date_joined', 'is_guest'
] # type: List[str]
def realm_user_dicts_cache_key(realm_id: int) -> str:
return "realm_user_dicts:%s" % (realm_id,)
def active_user_ids_cache_key(realm_id: int) -> str:
return "active_user_ids:%s" % (realm_id,)
def active_non_guest_user_ids_cache_key(realm_id: int) -> str:
return "active_non_guest_user_ids:%s" % (realm_id,)
bot_dict_fields = ['id', 'full_name', 'short_name', 'bot_type', 'email',
'is_active', 'default_sending_stream__name',
'realm_id',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source',
'avatar_version'] # type: List[str]
def bot_dicts_in_realm_cache_key(realm: 'Realm') -> str:
return "bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name: str, realm_id: int) -> str:
return "stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles: Iterable['UserProfile']) -> None:
# Imported here to avoid cyclic dependency.
from zerver.lib.users import get_all_api_keys
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
for api_key in get_all_api_keys(user_profile):
keys.append(user_profile_by_api_key_cache_key(api_key))
keys.append(user_profile_cache_key(user_profile.email, user_profile.realm))
cache_delete_many(keys)
def delete_display_recipient_cache(user_profile: 'UserProfile') -> None:
from zerver.models import Subscription # We need to import here to avoid cyclic dependency.
recipient_ids = Subscription.objects.filter(user_profile=user_profile)
recipient_ids = recipient_ids.values_list('recipient_id', flat=True)
keys = [display_recipient_cache_key(rid) for rid in recipient_ids]
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender: Any, **kwargs: Any) -> None:
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
def changed(fields: List[str]) -> bool:
if kwargs.get('update_fields') is None:
# adds/deletes should invalidate the cache
return True
update_fields = set(kwargs['update_fields'])
for f in fields:
if f in update_fields:
return True
return False
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if changed(realm_user_dict_fields):
cache_delete(realm_user_dicts_cache_key(user_profile.realm_id))
if changed(['is_active']):
cache_delete(active_user_ids_cache_key(user_profile.realm_id))
cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))
if changed(['is_guest']):
cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))
if changed(['email', 'full_name', 'short_name', 'id', 'is_mirror_dummy']):
delete_display_recipient_cache(user_profile)
# Invalidate our bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and changed(bot_dict_fields):
cache_delete(bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if changed(['alert_words']):
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
# Deleting realm or updating message_visibility_limit
# attribute should clear the first_visible_message_id cache.
if kwargs.get('update_fields') is None or "message_visibility_limit" in kwargs['update_fields']:
cache_delete(realm_first_visible_message_id_cache_key(realm))
if realm.deactivated or (kwargs["update_fields"] is not None and
"string_id" in kwargs['update_fields']):
cache_delete(realm_user_dicts_cache_key(realm.id))
cache_delete(active_user_ids_cache_key(realm.id))
cache_delete(bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(active_non_guest_user_ids_cache_key(realm.id))
def realm_alert_words_cache_key(realm: 'Realm') -> str:
return "realm_alert_words:%s" % (realm.string_id,)
def realm_first_visible_message_id_cache_key(realm: 'Realm') -> str:
return u"realm_first_visible_message_id:%s" % (realm.string_id,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender: Any, **kwargs: Any) -> None:
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)).exists():
cache_delete(bot_dicts_in_realm_cache_key(stream.realm))
def to_dict_cache_key_id(message_id: int) -> str:
return 'message_dict:%d' % (message_id,)
def to_dict_cache_key(message: 'Message') -> str:
return to_dict_cache_key_id(message.id)
def flush_message(sender: Any, **kwargs: Any) -> None:
message = kwargs['instance']
cache_delete(to_dict_cache_key_id(message.id))
def flush_submessage(sender: Any, **kwargs: Any) -> None:
submessage = kwargs['instance']
# submessages are not cached directly, they are part of their
# parent messages
message_id = submessage.message_id
cache_delete(to_dict_cache_key_id(message_id))
DECORATOR = Callable[[Callable[..., Any]], Callable[..., Any]]
def ignore_unhashable_lru_cache(maxsize: int=128, typed: bool=False) -> DECORATOR:
"""
This is a wrapper over lru_cache function. It adds following features on
top of lru_cache:
* It will not cache result of functions with unhashable arguments.
* It will clear cache whenever zerver.lib.cache.KEY_PREFIX changes.
"""
internal_decorator = lru_cache(maxsize=maxsize, typed=typed)
def decorator(user_function: Callable[..., Any]) -> Callable[..., Any]:
if settings.DEVELOPMENT and not settings.TEST_SUITE: # nocoverage
# In the development environment, we want every file
# change to refresh the source files from disk.
return user_function
cache_enabled_user_function = internal_decorator(user_function)
def wrapper(*args: Any, **kwargs: Any) -> Any:
if not hasattr(cache_enabled_user_function, 'key_prefix'):
cache_enabled_user_function.key_prefix = KEY_PREFIX
if cache_enabled_user_function.key_prefix != KEY_PREFIX:
# Clear cache when cache.KEY_PREFIX changes. This is used in
# tests.
cache_enabled_user_function.cache_clear()
cache_enabled_user_function.key_prefix = KEY_PREFIX
try:
return cache_enabled_user_function(*args, **kwargs)
except TypeError:
# args or kwargs contains an element which is unhashable. In
# this case we don't cache the result.
pass
# Deliberately calling this function from outside of exception
# handler to get a more descriptive traceback. Otherise traceback
# can include the exception from cached_enabled_user_function as
# well.
return user_function(*args, **kwargs)
setattr(wrapper, 'cache_info', cache_enabled_user_function.cache_info)
setattr(wrapper, 'cache_clear', cache_enabled_user_function.cache_clear)
return wrapper
return decorator
| [
"str",
"Optional[str]",
"Callable[..., str]",
"Callable[..., ReturnT]",
"Any",
"Any",
"Callable[..., str]",
"Callable[..., ReturnT]",
"Any",
"Any",
"str",
"Any",
"str",
"List[str]",
"Dict[str, Any]",
"str",
"Iterable[str]",
"CompressedItemT",
"ItemT",
"ItemT",
"ItemT",
"Callable[[ObjKT], str]",
"Callable[[List[ObjKT]], Iterable[Any]]",
"Iterable[ObjKT]",
"Callable[..., ReturnT]",
"Any",
"Any",
"str",
"int",
"str",
"str",
"int",
"str",
"'Realm'",
"str",
"int",
"str",
"int",
"int",
"int",
"'Realm'",
"str",
"int",
"Iterable['UserProfile']",
"'UserProfile'",
"Any",
"Any",
"List[str]",
"Any",
"Any",
"'Realm'",
"'Realm'",
"Any",
"Any",
"int",
"'Message'",
"Any",
"Any",
"Any",
"Any",
"Callable[..., Any]",
"Any",
"Any"
] | [
3257,
3596,
3747,
4137,
4248,
4263,
4586,
5099,
5208,
5223,
6120,
6130,
6403,
6648,
6981,
7351,
7549,
7928,
8059,
8194,
8343,
9194,
9242,
9302,
10846,
11176,
11191,
11458,
11572,
11686,
12026,
12041,
12169,
12181,
12286,
12414,
12531,
12872,
12973,
13083,
13593,
13698,
13713,
13886,
14497,
14992,
15007,
15131,
16779,
16794,
17689,
17814,
18010,
18025,
18613,
18703,
18793,
18808,
18936,
18951,
19693,
20069,
20084
] | [
3260,
3609,
3765,
4159,
4251,
4266,
4604,
5121,
5211,
5226,
6123,
6133,
6406,
6657,
6995,
7354,
7562,
7943,
8064,
8199,
8348,
9216,
9280,
9317,
10868,
11179,
11194,
11461,
11575,
11689,
12029,
12044,
12172,
12188,
12289,
12417,
12534,
12875,
12976,
13086,
13600,
13701,
13716,
13909,
14510,
14995,
15010,
15140,
16782,
16797,
17696,
17821,
18013,
18028,
18616,
18712,
18796,
18811,
18939,
18954,
19711,
20072,
20087
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/cache_helpers.py | # See https://zulip.readthedocs.io/en/latest/subsystems/caching.html for docs
from typing import Any, Callable, Dict, List, Tuple
import datetime
import logging
# This file needs to be different from cache.py because cache.py
# cannot import anything from zerver.models or we'd have an import
# loop
from analytics.models import RealmCount
from django.conf import settings
from zerver.models import Message, UserProfile, Stream, get_stream_cache_key, \
Recipient, get_recipient_cache_key, Client, get_client_cache_key, \
Huddle, huddle_hash_cache_key
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_api_key_cache_key, \
user_profile_cache_key, get_remote_cache_time, get_remote_cache_requests, \
cache_set_many, to_dict_cache_key_id
from zerver.lib.message import MessageDict
from zerver.lib.users import get_all_api_keys
from importlib import import_module
from django.contrib.sessions.models import Session
from django.db.models import Q
from django.utils.timezone import now as timezone_now
MESSAGE_CACHE_SIZE = 75000
def message_fetch_objects() -> List[Any]:
try:
max_id = Message.objects.only('id').order_by("-id")[0].id
except IndexError:
return []
return Message.objects.select_related().filter(~Q(sender__email='tabbott/extra@mit.edu'),
id__gt=max_id - MESSAGE_CACHE_SIZE)
def message_cache_items(items_for_remote_cache: Dict[str, Tuple[bytes]],
message: Message) -> None:
'''
Note: this code is untested, and the caller has been
commented out for a while.
'''
key = to_dict_cache_key_id(message.id)
value = MessageDict.to_dict_uncached(message)
items_for_remote_cache[key] = (value,)
def user_cache_items(items_for_remote_cache: Dict[str, Tuple[UserProfile]],
user_profile: UserProfile) -> None:
for api_key in get_all_api_keys(user_profile):
items_for_remote_cache[user_profile_by_api_key_cache_key(api_key)] = (user_profile,)
items_for_remote_cache[user_profile_cache_key(user_profile.email, user_profile.realm)] = (user_profile,)
# We have other user_profile caches, but none of them are on the
# core serving path for lots of requests.
def stream_cache_items(items_for_remote_cache: Dict[str, Tuple[Stream]],
stream: Stream) -> None:
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)
def client_cache_items(items_for_remote_cache: Dict[str, Tuple[Client]],
client: Client) -> None:
items_for_remote_cache[get_client_cache_key(client.name)] = (client,)
def huddle_cache_items(items_for_remote_cache: Dict[str, Tuple[Huddle]],
huddle: Huddle) -> None:
items_for_remote_cache[huddle_hash_cache_key(huddle.huddle_hash)] = (huddle,)
def recipient_cache_items(items_for_remote_cache: Dict[str, Tuple[Recipient]],
recipient: Recipient) -> None:
items_for_remote_cache[get_recipient_cache_key(recipient.type, recipient.type_id)] = (recipient,)
session_engine = import_module(settings.SESSION_ENGINE)
def session_cache_items(items_for_remote_cache: Dict[str, str],
session: Session) -> None:
if settings.SESSION_ENGINE != "django.contrib.sessions.backends.cached_db":
# If we're not using the cached_db session engine, we there
# will be no store.cache_key attribute, and in any case we
# don't need to fill the cache, since it won't exist.
return
store = session_engine.SessionStore(session_key=session.session_key) # type: ignore # import_module
items_for_remote_cache[store.cache_key] = store.decode(session.session_data)
def get_active_realm_ids() -> List[int]:
"""For servers like zulipchat.com with a lot of realms, it only makes
sense to do cache-filling work for realms that have any currently
active users/clients. Otherwise, we end up with every single-user
trial organization that has ever been created costing us N streams
worth of cache work (where N is the number of default streams for
a new organization).
"""
date = timezone_now() - datetime.timedelta(days=2)
return RealmCount.objects.filter(
end_time__gte=date,
property="1day_actives::day",
value__gt=0).distinct("realm_id").values_list("realm_id", flat=True)
def get_streams() -> List[Stream]:
return Stream.objects.select_related().filter(
realm__in=get_active_realm_ids()).exclude(
# We filter out Zephyr realms, because they can easily
# have 10,000s of streams with only 1 subscriber.
is_in_zephyr_realm=True)
def get_recipients() -> List[Recipient]:
return Recipient.objects.select_related().filter(
type_id__in=get_streams().values_list("id", flat=True)) # type: ignore # Should be QuerySet above
def get_users() -> List[UserProfile]:
return UserProfile.objects.select_related().filter(
long_term_idle=False,
realm__in=get_active_realm_ids())
# Format is (objects query, items filler function, timeout, batch size)
#
# The objects queries are put inside lambdas to prevent Django from
# doing any setup for things we're unlikely to use (without the lambda
# wrapper the below adds an extra 3ms or so to startup time for
# anything importing this file).
cache_fillers = {
'user': (get_users, user_cache_items, 3600*24*7, 10000),
'client': (lambda: Client.objects.select_related().all(), client_cache_items, 3600*24*7, 10000),
'recipient': (get_recipients, recipient_cache_items, 3600*24*7, 10000),
'stream': (get_streams, stream_cache_items, 3600*24*7, 10000),
# Message cache fetching disabled until we can fix the fact that it
# does a bunch of inefficient memcached queries as part of filling
# the display_recipient cache
# 'message': (message_fetch_objects, message_cache_items, 3600 * 24, 1000),
'huddle': (lambda: Huddle.objects.select_related().all(), huddle_cache_items, 3600*24*7, 10000),
'session': (lambda: Session.objects.all(), session_cache_items, 3600*24*7, 10000),
} # type: Dict[str, Tuple[Callable[[], List[Any]], Callable[[Dict[str, Any], Any], None], int, int]]
def fill_remote_cache(cache: str) -> None:
remote_cache_time_start = get_remote_cache_time()
remote_cache_requests_start = get_remote_cache_requests()
items_for_remote_cache = {} # type: Dict[str, Any]
(objects, items_filler, timeout, batch_size) = cache_fillers[cache]
count = 0
for obj in objects():
items_filler(items_for_remote_cache, obj)
count += 1
if (count % batch_size == 0):
cache_set_many(items_for_remote_cache, timeout=3600*24)
items_for_remote_cache = {}
cache_set_many(items_for_remote_cache, timeout=3600*24*7)
logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)" %
(cache, get_remote_cache_requests() - remote_cache_requests_start,
round(get_remote_cache_time() - remote_cache_time_start, 2)))
| [
"Dict[str, Tuple[bytes]]",
"Message",
"Dict[str, Tuple[UserProfile]]",
"UserProfile",
"Dict[str, Tuple[Stream]]",
"Stream",
"Dict[str, Tuple[Client]]",
"Client",
"Dict[str, Tuple[Huddle]]",
"Huddle",
"Dict[str, Tuple[Recipient]]",
"Recipient",
"Dict[str, str]",
"Session",
"str"
] | [
1460,
1518,
1822,
1888,
2326,
2383,
2539,
2596,
2735,
2792,
2942,
3008,
3235,
3284,
6336
] | [
1483,
1525,
1851,
1899,
2350,
2389,
2563,
2602,
2759,
2798,
2969,
3017,
3249,
3291,
6339
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/camo.py | from django.conf import settings
import codecs
import hashlib
import hmac
def generate_camo_url(url: str) -> str:
encoded_url = url.encode("utf-8")
encoded_camo_key = settings.CAMO_KEY.encode("utf-8")
digest = hmac.new(encoded_camo_key, encoded_url, hashlib.sha1).hexdigest()
hex_encoded_url = codecs.encode(encoded_url, "hex") # type: ignore # https://github.com/python/typeshed/issues/300
return "%s/%s" % (digest, hex_encoded_url.decode("utf-8"))
# Encodes the provided URL using the same algorithm used by the camo
# caching https image proxy
def get_camo_url(url: str) -> str:
# Only encode the url if Camo is enabled
if settings.CAMO_URI == '':
return url
return "%s%s" % (settings.CAMO_URI, generate_camo_url(url))
| [
"str",
"str"
] | [
102,
592
] | [
105,
595
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/ccache.py | from typing import Any, Dict, List, Optional
# This file is adapted from samples/shellinabox/ssh-krb-wrapper in
# https://github.com/davidben/webathena, which has the following
# license:
#
# Copyright (c) 2013 David Benjamin and Alan Huang
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import struct
from typing import Union
def force_bytes(s: Union[str, bytes], encoding: str='utf-8') -> bytes:
"""converts a string to binary string"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode(encoding)
else:
raise TypeError("force_bytes expects a string type")
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as
# there is already an ASN.1 implementation, but in the interest of
# limiting MIT Kerberos's exposure to malformed ccaches, encode it
# ourselves. To that end, here's the laziest DER encoder ever.
def der_encode_length(length: int) -> bytes:
if length <= 127:
return struct.pack('!B', length)
out = b""
while length > 0:
out = struct.pack('!B', length & 0xff) + out
length >>= 8
out = struct.pack('!B', len(out) | 0x80) + out
return out
def der_encode_tlv(tag: int, value: bytes) -> bytes:
return struct.pack('!B', tag) + der_encode_length(len(value)) + value
def der_encode_integer_value(val: int) -> bytes:
if not isinstance(val, int):
raise TypeError("int")
# base 256, MSB first, two's complement, minimum number of octets
# necessary. This has a number of annoying edge cases:
# * 0 and -1 are 0x00 and 0xFF, not the empty string.
# * 255 is 0x00 0xFF, not 0xFF
# * -256 is 0xFF 0x00, not 0x00
# Special-case to avoid an empty encoding.
if val == 0:
return b"\x00"
sign = 0 # What you would get if you sign-extended the current high bit.
out = b""
# We can stop once sign-extension matches the remaining value.
while val != sign:
byte = val & 0xff
out = struct.pack('!B', byte) + out
sign = -1 if byte & 0x80 == 0x80 else 0
val >>= 8
return out
def der_encode_integer(val: int) -> bytes:
return der_encode_tlv(0x02, der_encode_integer_value(val))
def der_encode_int32(val: int) -> bytes:
if val < -2147483648 or val > 2147483647:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_uint32(val: int) -> bytes:
if val < 0 or val > 4294967295:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_string(val: str) -> bytes:
if not isinstance(val, str):
raise TypeError("unicode")
return der_encode_tlv(0x1b, val.encode("utf-8"))
def der_encode_octet_string(val: bytes) -> bytes:
if not isinstance(val, bytes):
raise TypeError("bytes")
return der_encode_tlv(0x04, val)
def der_encode_sequence(tlvs: List[Optional[bytes]], tagged: Optional[bool]=True) -> bytes:
body = []
for i, tlv in enumerate(tlvs):
# Missing optional elements represented as None.
if tlv is None:
continue
if tagged:
# Assume kerberos-style explicit tagging of components.
tlv = der_encode_tlv(0xa0 | i, tlv)
body.append(tlv)
return der_encode_tlv(0x30, b"".join(body))
def der_encode_ticket(tkt: Dict[str, Any]) -> bytes:
return der_encode_tlv(
0x61, # Ticket
der_encode_sequence(
[der_encode_integer(5), # tktVno
der_encode_string(tkt["realm"]),
der_encode_sequence( # PrincipalName
[der_encode_int32(tkt["sname"]["nameType"]),
der_encode_sequence([der_encode_string(c)
for c in tkt["sname"]["nameString"]],
tagged=False)]),
der_encode_sequence( # EncryptedData
[der_encode_int32(tkt["encPart"]["etype"]),
(der_encode_uint32(tkt["encPart"]["kvno"])
if "kvno" in tkt["encPart"]
else None),
der_encode_octet_string(
base64.b64decode(tkt["encPart"]["cipher"]))])]))
# Kerberos ccache writing code. Using format documentation from here:
# http://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
def ccache_counted_octet_string(data: bytes) -> bytes:
if not isinstance(data, bytes):
raise TypeError("bytes")
return struct.pack("!I", len(data)) + data
def ccache_principal(name: Dict[str, str], realm: str) -> bytes:
header = struct.pack("!II", name["nameType"], len(name["nameString"]))
return (header + ccache_counted_octet_string(force_bytes(realm)) +
b"".join(ccache_counted_octet_string(force_bytes(c))
for c in name["nameString"]))
def ccache_key(key: Dict[str, str]) -> bytes:
return (struct.pack("!H", key["keytype"]) +
ccache_counted_octet_string(base64.b64decode(key["keyvalue"])))
def flags_to_uint32(flags: List[str]) -> int:
ret = 0
for i, v in enumerate(flags):
if v:
ret |= 1 << (31 - i)
return ret
def ccache_credential(cred: Dict[str, Any]) -> bytes:
out = ccache_principal(cred["cname"], cred["crealm"])
out += ccache_principal(cred["sname"], cred["srealm"])
out += ccache_key(cred["key"])
out += struct.pack("!IIII",
cred["authtime"] // 1000,
cred.get("starttime", cred["authtime"]) // 1000,
cred["endtime"] // 1000,
cred.get("renewTill", 0) // 1000)
out += struct.pack("!B", 0)
out += struct.pack("!I", flags_to_uint32(cred["flags"]))
# TODO: Care about addrs or authdata? Former is "caddr" key.
out += struct.pack("!II", 0, 0)
out += ccache_counted_octet_string(der_encode_ticket(cred["ticket"]))
# No second_ticket.
out += ccache_counted_octet_string(b"")
return out
def make_ccache(cred: Dict[str, Any]) -> bytes:
# Do we need a DeltaTime header? The ccache I get just puts zero
# in there, so do the same.
out = struct.pack("!HHHHII",
0x0504, # file_format_version
12, # headerlen
1, # tag (DeltaTime)
8, # taglen (two uint32_ts)
0, 0, # time_offset / usec_offset
)
out += ccache_principal(cred["cname"], cred["crealm"])
out += ccache_credential(cred)
return out
| [
"Union[str, bytes]",
"int",
"int",
"bytes",
"int",
"int",
"int",
"int",
"str",
"bytes",
"List[Optional[bytes]]",
"Dict[str, Any]",
"bytes",
"Dict[str, str]",
"str",
"Dict[str, str]",
"List[str]",
"Dict[str, Any]",
"Dict[str, Any]"
] | [
1377,
2091,
2370,
2382,
2508,
3295,
3399,
3560,
3712,
3882,
4035,
4484,
5561,
5722,
5745,
6043,
6221,
6377,
7187
] | [
1394,
2094,
2373,
2387,
2511,
3298,
3402,
3563,
3715,
3887,
4056,
4498,
5566,
5736,
5748,
6057,
6230,
6391,
7201
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/context_managers.py | """
Context managers, i.e. things you can use with the 'with' statement.
"""
import fcntl
from contextlib import contextmanager
from typing import Iterator, IO, Any, Union
@contextmanager
def flock(lockfile: Union[int, IO[Any]], shared: bool=False) -> Iterator[None]:
"""Lock a file object using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX."""
fcntl.flock(lockfile, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lockfile, fcntl.LOCK_UN)
@contextmanager
def lockfile(filename: str, shared: bool=False) -> Iterator[None]:
"""Lock a file using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.
The file is given by name and will be created if it does not exist."""
with open(filename, 'w') as lock:
with flock(lock, shared=shared):
yield
| [
"Union[int, IO[Any]]",
"str"
] | [
211,
613
] | [
230,
616
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/create_user.py |
from django.contrib.auth.models import UserManager
from django.utils.timezone import now as timezone_now
from zerver.models import UserProfile, Recipient, Subscription, Realm, Stream
from zerver.lib.upload import copy_avatar
from zerver.lib.hotspots import copy_hotpots
from zerver.lib.utils import generate_api_key
import base64
import ujson
import os
import string
from typing import Optional
def copy_user_settings(source_profile: UserProfile, target_profile: UserProfile) -> None:
"""Warning: Does not save, to avoid extra database queries"""
for settings_name in UserProfile.property_types:
value = getattr(source_profile, settings_name)
setattr(target_profile, settings_name, value)
for settings_name in UserProfile.notification_setting_types:
value = getattr(source_profile, settings_name)
setattr(target_profile, settings_name, value)
setattr(target_profile, "full_name", source_profile.full_name)
setattr(target_profile, "enter_sends", source_profile.enter_sends)
target_profile.save()
if source_profile.avatar_source == UserProfile.AVATAR_FROM_USER:
from zerver.lib.actions import do_change_avatar_fields
do_change_avatar_fields(target_profile, UserProfile.AVATAR_FROM_USER)
copy_avatar(source_profile, target_profile)
copy_hotpots(source_profile, target_profile)
# create_user_profile is based on Django's User.objects.create_user,
# except that we don't save to the database so it can used in
# bulk_creates
#
# Only use this for bulk_create -- for normal usage one should use
# create_user (below) which will also make the Subscription and
# Recipient objects
def create_user_profile(realm: Realm, email: str, password: Optional[str],
active: bool, bot_type: Optional[int], full_name: str,
short_name: str, bot_owner: Optional[UserProfile],
is_mirror_dummy: bool, tos_version: Optional[str],
timezone: Optional[str],
tutorial_status: Optional[str] = UserProfile.TUTORIAL_WAITING,
enter_sends: bool = False) -> UserProfile:
now = timezone_now()
email = UserManager.normalize_email(email)
user_profile = UserProfile(email=email, is_staff=False, is_active=active,
full_name=full_name, short_name=short_name,
last_login=now, date_joined=now, realm=realm,
pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,
bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,
tos_version=tos_version, timezone=timezone,
tutorial_status=tutorial_status,
enter_sends=enter_sends,
onboarding_steps=ujson.dumps([]),
default_language=realm.default_language,
twenty_four_hour_time=realm.default_twenty_four_hour_time,
delivery_email=email)
if bot_type or not active:
password = None
user_profile.set_password(password)
user_profile.api_key = generate_api_key()
return user_profile
def create_user(email: str, password: Optional[str], realm: Realm,
full_name: str, short_name: str, active: bool = True,
is_realm_admin: bool = False, bot_type: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None, timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
is_mirror_dummy: bool = False,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
source_profile: Optional[UserProfile] = None) -> UserProfile:
user_profile = create_user_profile(realm, email, password, active, bot_type,
full_name, short_name, bot_owner,
is_mirror_dummy, tos_version, timezone)
user_profile.is_realm_admin = is_realm_admin
user_profile.avatar_source = avatar_source
user_profile.timezone = timezone
user_profile.default_sending_stream = default_sending_stream
user_profile.default_events_register_stream = default_events_register_stream
# Allow the ORM default to be used if not provided
if default_all_public_streams is not None:
user_profile.default_all_public_streams = default_all_public_streams
# If a source profile was specified, we copy settings from that
# user. Note that this is positioned in a way that overrides
# other arguments passed in, which is correct for most defaults
# like timezone where the source profile likely has a better value
# than the guess. As we decide on details like avatars and full
# names for this feature, we may want to move it.
if source_profile is not None:
# copy_user_settings saves the attribute values so a secondary
# save is not required.
copy_user_settings(source_profile, user_profile)
else:
user_profile.save()
recipient = Recipient.objects.create(type_id=user_profile.id,
type=Recipient.PERSONAL)
Subscription.objects.create(user_profile=user_profile, recipient=recipient)
return user_profile
| [
"UserProfile",
"UserProfile",
"Realm",
"str",
"Optional[str]",
"bool",
"Optional[int]",
"str",
"str",
"Optional[UserProfile]",
"bool",
"Optional[str]",
"Optional[str]",
"str",
"Optional[str]",
"Realm",
"str",
"str"
] | [
438,
467,
1701,
1715,
1730,
1777,
1793,
1819,
1860,
1876,
1940,
1959,
2008,
3317,
3332,
3354,
3388,
3405
] | [
449,
478,
1706,
1718,
1743,
1781,
1806,
1822,
1863,
1897,
1944,
1972,
2021,
3320,
3345,
3359,
3391,
3408
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/db.py |
import time
from psycopg2.extensions import cursor, connection
from typing import Callable, Optional, Iterable, Any, Dict, List, Union, TypeVar, \
Mapping
from zerver.lib.str_utils import NonBinaryStr
CursorObj = TypeVar('CursorObj', bound=cursor)
ParamsT = Union[Iterable[Any], Mapping[str, Any]]
# Similar to the tracking done in Django's CursorDebugWrapper, but done at the
# psycopg2 cursor level so it works with SQLAlchemy.
def wrapper_execute(self: CursorObj,
action: Callable[[NonBinaryStr, Optional[ParamsT]], CursorObj],
sql: NonBinaryStr,
params: Optional[ParamsT]=()) -> CursorObj:
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
self.connection.queries.append({
'time': "%.3f" % duration,
})
class TimeTrackingCursor(cursor):
"""A psycopg2 cursor class that tracks the time spent executing queries."""
def execute(self, query: NonBinaryStr,
vars: Optional[ParamsT]=None) -> 'TimeTrackingCursor':
return wrapper_execute(self, super().execute, query, vars)
def executemany(self, query: NonBinaryStr,
vars: Iterable[Any]) -> 'TimeTrackingCursor':
return wrapper_execute(self, super().executemany, query, vars)
class TimeTrackingConnection(connection):
"""A psycopg2 connection class that uses TimeTrackingCursors."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.queries = [] # type: List[Dict[str, str]]
super().__init__(*args, **kwargs)
def cursor(self, *args: Any, **kwargs: Any) -> TimeTrackingCursor:
kwargs.setdefault('cursor_factory', TimeTrackingCursor)
return connection.cursor(self, *args, **kwargs)
def reset_queries() -> None:
from django.db import connections
for conn in connections.all():
if conn.connection is not None:
conn.connection.queries = []
| [
"CursorObj",
"Callable[[NonBinaryStr, Optional[ParamsT]], CursorObj]",
"NonBinaryStr",
"NonBinaryStr",
"NonBinaryStr",
"Iterable[Any]",
"Any",
"Any",
"Any",
"Any"
] | [
464,
503,
584,
1038,
1224,
1264,
1518,
1533,
1674,
1689
] | [
473,
557,
596,
1050,
1236,
1277,
1521,
1536,
1677,
1692
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/debug.py |
import code
import gc
import logging
import os
import signal
import socket
import threading
import traceback
import tracemalloc
from types import FrameType
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing import Optional
logger = logging.getLogger('zulip.debug')
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig: int, frame: FrameType) -> None:
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen() -> None:
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
def tracemalloc_listen_sock(sock: socket.socket) -> None:
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
while True:
sock.recv(1)
tracemalloc_dump()
listener_pid = None # type: Optional[int]
def tracemalloc_listen() -> None:
global listener_pid
if listener_pid == os.getpid():
# Already set up -- and in this process, not just its parent.
return
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
listener_pid = os.getpid()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
path = "/tmp/tracemalloc.{}".format(os.getpid())
sock.bind(path)
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
daemon=True)
thread.start()
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
os.getpid(), path))
def maybe_tracemalloc_listen() -> None:
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
To trigger once this is listening:
echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid
To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,
and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.
This function is called in middleware, so the process will
automatically start listening.
To enable in other contexts: see upstream docs
https://docs.python.org/3/library/tracemalloc .
You may also have to add a call to this function somewhere.
'''
if os.environ.get('PYTHONTRACEMALLOC'):
# If the server was started with `tracemalloc` tracing on, then
# listen for a signal to dump `tracemalloc` snapshots.
tracemalloc_listen()
| [
"int",
"FrameType",
"socket.socket"
] | [
642,
654,
2488
] | [
645,
663,
2501
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/digest.py | from typing import Any, Callable, Dict, Iterable, List, Set, Tuple
from collections import defaultdict
import datetime
import logging
import pytz
from django.db.models import Q, QuerySet
from django.template import loader
from django.conf import settings
from django.utils.timezone import now as timezone_now
from confirmation.models import one_click_unsubscribe_link
from zerver.lib.notifications import build_message_list
from zerver.lib.send_email import send_future_email, FromAddress
from zerver.lib.url_encoding import encode_stream
from zerver.models import UserProfile, UserMessage, Recipient, Stream, \
Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \
Realm, Message
from zerver.context_processors import common_context
from zerver.lib.queue import queue_json_publish
from zerver.lib.logging_util import log_to_file
logger = logging.getLogger(__name__)
log_to_file(logger, settings.DIGEST_LOG_PATH)
VALID_DIGEST_DAY = 1 # Tuesdays
DIGEST_CUTOFF = 5
# Digests accumulate 4 types of interesting traffic for a user:
# 1. Missed PMs
# 2. New streams
# 3. New users
# 4. Interesting stream traffic, as determined by the longest and most
# diversely comment upon topics.
def inactive_since(user_profile: UserProfile, cutoff: datetime.datetime) -> bool:
# Hasn't used the app in the last DIGEST_CUTOFF (5) days.
most_recent_visit = [row.last_visit for row in
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def should_process_digest(realm_str: str) -> bool:
if realm_str in settings.SYSTEM_ONLY_REALMS:
# Don't try to send emails to system-only realms
return False
return True
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile: UserProfile, cutoff: datetime.datetime) -> None:
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event)
def enqueue_emails(cutoff: datetime.datetime) -> None:
if not settings.SEND_DIGEST_EMAILS:
return
if timezone_now().weekday() != VALID_DIGEST_DAY:
return
for realm in Realm.objects.filter(deactivated=False, digest_emails_enabled=True):
if not should_process_digest(realm.string_id):
continue
user_profiles = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, enable_digest_emails=True)
for user_profile in user_profiles:
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
logger.info("%s is inactive, queuing for potential digest" % (
user_profile.email,))
def gather_hot_conversations(user_profile: UserProfile, stream_ums: QuerySet) -> List[Dict[str, Any]]:
# Gather stream conversations of 2 types:
# 1. long conversations
# 2. conversations where many different people participated
#
# Returns a list of dictionaries containing the templating
# information for each hot conversation.
# stream_ums is a list of UserMessage rows for a single
# user, so the list of messages is distinct here.
messages = [um.message for um in stream_ums]
conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int]
conversation_messages = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]]
for message in messages:
key = (message.recipient.type_id,
message.topic_name())
conversation_messages[key].append(message)
if not message.sent_by_human():
# Don't include automated messages in the count.
continue
conversation_diversity[key].add(
message.sender.full_name)
conversation_length[key] += 1
diversity_list = list(conversation_diversity.items())
diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)
length_list = list(conversation_length.items())
length_list.sort(key=lambda entry: entry[1], reverse=True)
# Get up to the 4 best conversations from the diversity list
# and length list, filtering out overlapping conversations.
hot_conversations = [elt[0] for elt in diversity_list[:2]]
for candidate, _ in length_list:
if candidate not in hot_conversations:
hot_conversations.append(candidate)
if len(hot_conversations) >= 4:
break
# There was so much overlap between the diversity and length lists that we
# still have < 4 conversations. Try to use remaining diversity items to pad
# out the hot conversations.
num_convos = len(hot_conversations)
if num_convos < 4:
hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])
hot_conversation_render_payloads = []
for h in hot_conversations:
users = list(conversation_diversity[h])
count = conversation_length[h]
messages = conversation_messages[h]
# We'll display up to 2 messages from the conversation.
first_few_messages = messages[:2]
teaser_data = {"participants": users,
"count": count - len(first_few_messages),
"first_few_messages": build_message_list(
user_profile, first_few_messages)}
hot_conversation_render_payloads.append(teaser_data)
return hot_conversation_render_payloads
def gather_new_users(user_profile: UserProfile, threshold: datetime.datetime) -> Tuple[int, List[str]]:
# Gather information on users in the realm who have recently
# joined.
if not user_profile.can_access_all_realm_members():
new_users = [] # type: List[UserProfile]
else:
new_users = list(UserProfile.objects.filter(
realm=user_profile.realm, date_joined__gt=threshold,
is_bot=False))
user_names = [user.full_name for user in new_users]
return len(user_names), user_names
def gather_new_streams(user_profile: UserProfile,
threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]:
if user_profile.can_access_public_streams():
new_streams = list(get_active_streams(user_profile.realm).filter(
invite_only=False, date_created__gt=threshold))
else:
new_streams = []
base_url = "%s/#narrow/stream/" % (user_profile.realm.uri,)
streams_html = []
streams_plain = []
for stream in new_streams:
narrow_url = base_url + encode_stream(stream.id, stream.name)
stream_link = "<a href='%s'>%s</a>" % (narrow_url, stream.name)
streams_html.append(stream_link)
streams_plain.append(stream.name)
return len(new_streams), {"html": streams_html, "plain": streams_plain}
def enough_traffic(unread_pms: str, hot_conversations: str, new_streams: int, new_users: int) -> bool:
if unread_pms or hot_conversations:
# If you have any unread traffic, good enough.
return True
if new_streams and new_users:
# If you somehow don't have any traffic but your realm did get
# new streams and users, good enough.
return True
return False
def handle_digest_email(user_profile_id: int, cutoff: float) -> None:
user_profile = get_user_profile_by_id(user_profile_id)
# We are disabling digest emails for soft deactivated users for the time.
# TODO: Find an elegant way to generate digest emails for these users.
if user_profile.long_term_idle:
return None
# Convert from epoch seconds to a datetime object.
cutoff_date = datetime.datetime.fromtimestamp(int(cutoff), tz=pytz.utc)
all_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__pub_date__gt=cutoff_date
).select_related('message').order_by("message__pub_date")
context = common_context(user_profile)
# Start building email template data.
context.update({
'realm_name': user_profile.realm.name,
'name': user_profile.full_name,
'unsubscribe_link': one_click_unsubscribe_link(user_profile, "digest")
})
# Gather recent missed PMs, re-using the missed PM email logic.
# You can't have an unread message that you sent, but when testing
# this causes confusion so filter your messages out.
pms = all_messages.filter(
~Q(message__recipient__type=Recipient.STREAM) &
~Q(message__sender=user_profile))
# Show up to 4 missed PMs.
pms_limit = 4
context['unread_pms'] = build_message_list(
user_profile, [pm.message for pm in pms[:pms_limit]])
context['remaining_unread_pms_count'] = min(0, len(pms) - pms_limit)
home_view_recipients = [sub.recipient for sub in
Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=True)]
stream_messages = all_messages.filter(
message__recipient__type=Recipient.STREAM,
message__recipient__in=home_view_recipients)
# Gather hot conversations.
context["hot_conversations"] = gather_hot_conversations(
user_profile, stream_messages)
# Gather new streams.
new_streams_count, new_streams = gather_new_streams(
user_profile, cutoff_date)
context["new_streams"] = new_streams
context["new_streams_count"] = new_streams_count
# Gather users who signed up recently.
new_users_count, new_users = gather_new_users(
user_profile, cutoff_date)
context["new_users"] = new_users
# We don't want to send emails containing almost no information.
if enough_traffic(context["unread_pms"], context["hot_conversations"],
new_streams_count, new_users_count):
logger.info("Sending digest email for %s" % (user_profile.email,))
# Send now, as a ScheduledEmail
send_future_email('zerver/emails/digest', user_profile.realm, to_user_id=user_profile.id,
from_name="Zulip Digest", from_address=FromAddress.NOREPLY,
context=context)
| [
"UserProfile",
"datetime.datetime",
"str",
"UserProfile",
"datetime.datetime",
"datetime.datetime",
"UserProfile",
"QuerySet",
"UserProfile",
"datetime.datetime",
"UserProfile",
"datetime.datetime",
"str",
"str",
"int",
"int",
"int",
"float"
] | [
1251,
1272,
1731,
2035,
2056,
2305,
3078,
3103,
5876,
5900,
6419,
6466,
7217,
7241,
7259,
7275,
7634,
7647
] | [
1262,
1289,
1734,
2046,
2073,
2322,
3089,
3111,
5887,
5917,
6430,
6483,
7220,
7244,
7262,
7278,
7637,
7652
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/domains.py | from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
import re
def validate_domain(domain: str) -> None:
if domain is None or len(domain) == 0:
raise ValidationError(_("Domain can't be empty."))
if '.' not in domain:
raise ValidationError(_("Domain must have at least one dot (.)"))
if len(domain) > 255:
raise ValidationError(_("Domain is too long"))
if domain[0] == '.' or domain[-1] == '.':
raise ValidationError(_("Domain cannot start or end with a dot (.)"))
for subdomain in domain.split('.'):
if not subdomain:
raise ValidationError(_("Consecutive '.' are not allowed."))
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(_("Subdomains cannot start or end with a '-'."))
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(_("Domain can only have letters, numbers, '.' and '-'s."))
| [
"str"
] | [
142
] | [
145
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/email_mirror.py | from typing import Any, Dict, List, Optional, Union
import logging
import re
from email.header import decode_header, Header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message, internal_send_private_message, \
internal_send_stream_message, internal_send_huddle_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.queue import queue_json_publish
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_file
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.lib.send_email import FromAddress
from zerver.models import Stream, Recipient, \
get_user_profile_by_id, get_display_recipient, get_personal_recipient, \
Message, Realm, UserProfile, get_system_bot, get_user, MAX_TOPIC_NAME_LENGTH, \
MAX_MESSAGE_LENGTH
logger = logging.getLogger(__name__)
def redact_stream(error_message: str) -> str:
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search('\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message: str) -> None:
if settings.ERROR_BOT is None:
return
error_bot = get_system_bot(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, "email mirror error",
"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message: message.Message, error_message: str, debug_info: Dict[str, Any]) -> None:
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = "Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = "Realm: %s\n%s" % (debug_info["stream"].realm.string_id,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token: str) -> str:
return 'missed_message:' + token
def is_missed_message_address(address: str) -> bool:
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string: Optional[str]) -> bool:
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string is not None and msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address: str) -> str:
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile: UserProfile, message: Message) -> str:
if settings.EMAIL_GATEWAY_PATTERN == '':
logger.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return FromAddress.NOREPLY
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_personal_recipient(message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.topic_name().encode('utf-8'),
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = 'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address: str) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def construct_zulip_body(message: message.Message, realm: Realm) -> str:
body = extract_body(message)
# Remove null characters, since Zulip will reject
body = body.replace("\x00", "")
body = filter_footer(body)
body += extract_and_upload_attachments(message, realm)
body = body.strip()
if not body:
body = '(No email body)'
return body
def send_to_missed_message_address(address: str, message: message.Message) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject_b = result # type: (bytes, bytes, bytes)
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
body = construct_zulip_body(message, user_profile.realm)
if recipient.type == Recipient.STREAM:
assert isinstance(display_recipient, str)
recipient_str = display_recipient
internal_send_stream_message(user_profile.realm, user_profile, recipient_str,
subject_b.decode('utf-8'), body)
elif recipient.type == Recipient.PERSONAL:
assert not isinstance(display_recipient, str)
recipient_str = display_recipient[0]['email']
recipient_user = get_user(recipient_str, user_profile.realm)
internal_send_private_message(user_profile.realm, user_profile,
recipient_user, body)
elif recipient.type == Recipient.HUDDLE:
assert not isinstance(display_recipient, str)
emails = [user_dict['email'] for user_dict in display_recipient]
recipient_str = ', '.join(emails)
internal_send_huddle_message(user_profile.realm, user_profile,
emails, body)
else:
raise AssertionError("Invalid recipient type!")
logger.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender: str, stream: Stream, topic: str, content: str) -> None:
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:MAX_TOPIC_NAME_LENGTH],
content[:MAX_MESSAGE_LENGTH],
email_gateway=True)
def valid_stream(stream_name: str, token: str) -> bool:
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message: message.Message, content_type: str) -> Optional[str]:
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, bytes)
if charsets[idx]:
return content.decode(charsets[idx], errors="ignore")
return None
talon_initialized = False
def extract_body(message: message.Message) -> str:
import talon
global talon_initialized
if not talon_initialized:
talon.init()
talon_initialized = True
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return talon.quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
return convert_html_to_markdown(talon.quotations.extract_from_html(html_content))
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text: str) -> str:
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message: message.Message, realm: Realm) -> str:
user_profile = get_system_bot(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, bytes):
s3_url = upload_message_file(filename, len(attachment), content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = "[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return "\n".join(attachment_links)
def extract_and_validate(email: str) -> Stream:
temp = decode_email_address(email)
if temp is None:
raise ZulipEmailForwardError("Malformed email recipient " + email)
stream_name, token = temp
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message: message.Message) -> str:
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Union[str, Header]]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in [str(recipient) for recipient in recipients]:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to: str, subject: str, message: message.Message,
debug_info: Dict[str, Any]) -> None:
stream = extract_and_validate(to)
body = construct_zulip_body(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logger.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.string_id))
def process_missed_message(to: str, message: message.Message, pre_checked: bool) -> None:
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message: message.Message, rcpt_to: Optional[str]=None, pre_checked: bool=False) -> None:
subject_header = str(message.get("Subject", "")).strip()
if subject_header == "":
subject_header = "(no topic)"
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = "(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
def mirror_email_message(data: Dict[str, str]) -> Dict[str, str]:
rcpt_to = data['recipient']
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Bad or expired missed message address."
}
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page."
}
queue_json_publish(
"email_mirror",
{
"message": data['msg_text'],
"rcpt_to": rcpt_to
}
)
return {"status": "success"}
| [
"str",
"str",
"message.Message",
"str",
"Dict[str, Any]",
"str",
"str",
"Optional[str]",
"str",
"UserProfile",
"Message",
"str",
"message.Message",
"Realm",
"str",
"message.Message",
"str",
"Stream",
"str",
"str",
"str",
"str",
"message.Message",
"str",
"message.Message",
"str",
"message.Message",
"Realm",
"str",
"message.Message",
"str",
"str",
"message.Message",
"Dict[str, Any]",
"str",
"message.Message",
"bool",
"message.Message",
"Dict[str, str]"
] | [
1074,
1416,
1767,
1799,
1816,
2505,
2596,
2754,
3063,
3503,
3525,
4645,
5117,
5141,
5504,
5518,
7434,
7447,
7462,
7476,
7730,
7742,
7970,
8001,
8430,
9186,
9586,
9610,
10759,
11165,
12057,
12071,
12085,
12141,
12499,
12513,
12543,
12708,
13839
] | [
1077,
1419,
1782,
1802,
1830,
2508,
2599,
2767,
3066,
3514,
3532,
4648,
5132,
5146,
5507,
5533,
7437,
7453,
7465,
7479,
7733,
7745,
7985,
8004,
8445,
9189,
9601,
9615,
10762,
11180,
12060,
12074,
12100,
12155,
12502,
12528,
12547,
12723,
13853
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/emoji.py |
import os
import re
import ujson
from django.conf import settings
from django.utils.translation import ugettext as _
from typing import Optional, Tuple
from zerver.lib.request import JsonableError
from zerver.lib.upload import upload_backend
from zerver.models import Reaction, Realm, RealmEmoji, UserProfile
EMOJI_PATH = os.path.join(settings.STATIC_ROOT, "generated", "emoji")
NAME_TO_CODEPOINT_PATH = os.path.join(EMOJI_PATH, "name_to_codepoint.json")
CODEPOINT_TO_NAME_PATH = os.path.join(EMOJI_PATH, "codepoint_to_name.json")
EMOTICON_CONVERSIONS_PATH = os.path.join(EMOJI_PATH, "emoticon_conversions.json")
with open(NAME_TO_CODEPOINT_PATH) as fp:
name_to_codepoint = ujson.load(fp)
with open(CODEPOINT_TO_NAME_PATH) as fp:
codepoint_to_name = ujson.load(fp)
with open(EMOTICON_CONVERSIONS_PATH) as fp:
EMOTICON_CONVERSIONS = ujson.load(fp)
possible_emoticons = EMOTICON_CONVERSIONS.keys()
possible_emoticon_regexes = map(re.escape, possible_emoticons) # type: ignore # AnyStr/str issues
terminal_symbols = ',.;?!()\\[\\] "\'\\n\\t' # type: str # from composebox_typeahead.js
emoticon_regex = ('(?<![^{0}])(?P<emoticon>('.format(terminal_symbols)
+ ')|('.join(possible_emoticon_regexes) # type: ignore # AnyStr/str issues
+ '))(?![^{0}])'.format(terminal_symbols))
# Translates emoticons to their colon syntax, e.g. `:smiley:`.
def translate_emoticons(text: str) -> str:
translated = text
for emoticon in EMOTICON_CONVERSIONS:
translated = re.sub(re.escape(emoticon), EMOTICON_CONVERSIONS[emoticon], translated)
return translated
def emoji_name_to_emoji_code(realm: Realm, emoji_name: str) -> Tuple[str, str]:
realm_emojis = realm.get_active_emoji()
realm_emoji = realm_emojis.get(emoji_name)
if realm_emoji is not None:
return str(realm_emojis[emoji_name]['id']), Reaction.REALM_EMOJI
if emoji_name == 'zulip':
return emoji_name, Reaction.ZULIP_EXTRA_EMOJI
if emoji_name in name_to_codepoint:
return name_to_codepoint[emoji_name], Reaction.UNICODE_EMOJI
raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,)))
def check_valid_emoji(realm: Realm, emoji_name: str) -> None:
emoji_name_to_emoji_code(realm, emoji_name)
def check_emoji_request(realm: Realm, emoji_name: str, emoji_code: str,
emoji_type: str) -> None:
# For a given realm and emoji type, checks whether an emoji
# code is valid for new reactions, or not.
if emoji_type == "realm_emoji":
realm_emojis = realm.get_emoji()
realm_emoji = realm_emojis.get(emoji_code)
if realm_emoji is None:
raise JsonableError(_("Invalid custom emoji."))
if realm_emoji["name"] != emoji_name:
raise JsonableError(_("Invalid custom emoji name."))
if realm_emoji["deactivated"]:
raise JsonableError(_("This custom emoji has been deactivated."))
elif emoji_type == "zulip_extra_emoji":
if emoji_code not in ["zulip"]:
raise JsonableError(_("Invalid emoji code."))
if emoji_name != emoji_code:
raise JsonableError(_("Invalid emoji name."))
elif emoji_type == "unicode_emoji":
if emoji_code not in codepoint_to_name:
raise JsonableError(_("Invalid emoji code."))
if name_to_codepoint.get(emoji_name) != emoji_code:
raise JsonableError(_("Invalid emoji name."))
else:
# The above are the only valid emoji types
raise JsonableError(_("Invalid emoji type."))
def check_emoji_admin(user_profile: UserProfile, emoji_name: Optional[str]=None) -> None:
"""Raises an exception if the user cannot administer the target realm
emoji name in their organization."""
# Realm administrators can always administer emoji
if user_profile.is_realm_admin:
return
if user_profile.realm.add_emoji_by_admins_only:
raise JsonableError(_("Must be an organization administrator"))
# Otherwise, normal users can add emoji
if emoji_name is None:
return
# Additionally, normal users can remove emoji they themselves added
emoji = RealmEmoji.objects.filter(realm=user_profile.realm,
name=emoji_name,
deactivated=False).first()
current_user_is_author = (emoji is not None and
emoji.author is not None and
emoji.author.id == user_profile.id)
if not user_profile.is_realm_admin and not current_user_is_author:
raise JsonableError(_("Must be an organization administrator or emoji author"))
def check_valid_emoji_name(emoji_name: str) -> None:
if re.match(r'^[0-9a-z.\-_]+(?<![.\-_])$', emoji_name):
return
raise JsonableError(_("Invalid characters in emoji name"))
def get_emoji_url(emoji_file_name: str, realm_id: int) -> str:
return upload_backend.get_emoji_url(emoji_file_name, realm_id)
def get_emoji_file_name(emoji_file_name: str, emoji_id: int) -> str:
_, image_ext = os.path.splitext(emoji_file_name)
return ''.join((str(emoji_id), image_ext))
| [
"str",
"Realm",
"str",
"Realm",
"str",
"Realm",
"str",
"str",
"str",
"UserProfile",
"str",
"str",
"int",
"str",
"int"
] | [
1424,
1655,
1674,
2190,
2209,
2303,
2322,
2339,
2380,
3606,
4726,
4914,
4929,
5052,
5067
] | [
1427,
1660,
1677,
2195,
2212,
2308,
2325,
2342,
2383,
3617,
4729,
4917,
4932,
5055,
5070
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/error_notify.py | # System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html
import logging
from collections import defaultdict
from django.conf import settings
from django.core.mail import mail_admins
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from typing import cast, Any, Dict, Optional
from zerver.filters import clean_data_from_query_parameters
from zerver.models import get_system_bot
from zerver.lib.actions import internal_send_message
from zerver.lib.response import json_success, json_error
def format_subject(subject: str) -> str:
"""
Escape CR and LF characters.
"""
return subject.replace('\n', '\\n').replace('\r', '\\r')
def logger_repr(report: Dict[str, Any]) -> str:
return ("Logger %(logger_name)s, from module %(log_module)s line %(log_lineno)d:"
% report)
def user_info_str(report: Dict[str, Any]) -> str:
if report['user_full_name'] and report['user_email']:
user_info = "%(user_full_name)s (%(user_email)s)" % (report)
else:
user_info = "Anonymous user (not logged in)"
user_info += " on %s deployment" % (report['deployment'],)
return user_info
def deployment_repr(report: Dict[str, Any]) -> str:
deployment = 'Deployed code:\n'
for (label, field) in [('git', 'git_described'),
('ZULIP_VERSION', 'zulip_version_const'),
('version', 'zulip_version_file'),
]:
if report[field] is not None:
deployment += '- %s: %s\n' % (label, report[field])
return deployment
def notify_browser_error(report: Dict[str, Any]) -> None:
report = defaultdict(lambda: None, report)
if settings.ERROR_BOT:
zulip_browser_error(report)
email_browser_error(report)
def email_browser_error(report: Dict[str, Any]) -> None:
subject = "Browser error for %s" % (user_info_str(report))
body = ("User: %(user_full_name)s <%(user_email)s> on %(deployment)s\n\n"
"Message:\n%(message)s\n\nStacktrace:\n%(stacktrace)s\n\n"
"IP address: %(ip_address)s\n"
"User agent: %(user_agent)s\n"
"href: %(href)s\n"
"Server path: %(server_path)s\n"
"Deployed version: %(version)s\n"
% (report))
more_info = report['more_info']
if more_info is not None:
body += "\nAdditional information:"
for (key, value) in more_info.items():
body += "\n %s: %s" % (key, value)
body += "\n\nLog:\n%s" % (report['log'],)
mail_admins(subject, body)
def zulip_browser_error(report: Dict[str, Any]) -> None:
subject = "JS error: %s" % (report['user_email'],)
user_info = user_info_str(report)
body = "User: %s\n" % (user_info,)
body += ("Message: %(message)s\n"
% (report))
realm = get_system_bot(settings.ERROR_BOT).realm
internal_send_message(realm, settings.ERROR_BOT,
"stream", "errors", format_subject(subject), body)
def notify_server_error(report: Dict[str, Any], skip_error_zulip: Optional[bool]=False) -> None:
report = defaultdict(lambda: None, report)
email_server_error(report)
if settings.ERROR_BOT and not skip_error_zulip:
zulip_server_error(report)
def zulip_server_error(report: Dict[str, Any]) -> None:
subject = '%(node)s: %(message)s' % (report)
logger_str = logger_repr(report)
user_info = user_info_str(report)
deployment = deployment_repr(report)
if report['has_request']:
request_repr = (
"Request info:\n~~~~\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
val = report.get(field.lower())
if field == "QUERY_STRING":
val = clean_data_from_query_parameters(str(val))
request_repr += "- %s: \"%s\"\n" % (field, val)
request_repr += "~~~~"
else:
request_repr = "Request info: none"
message = ("%s\nError generated by %s\n\n~~~~ pytb\n%s\n\n~~~~\n%s\n%s"
% (logger_str, user_info, report['stack_trace'], deployment, request_repr))
realm = get_system_bot(settings.ERROR_BOT).realm
internal_send_message(realm, settings.ERROR_BOT, "stream", "errors",
format_subject(subject), message)
def email_server_error(report: Dict[str, Any]) -> None:
subject = '%(node)s: %(message)s' % (report)
logger_str = logger_repr(report)
user_info = user_info_str(report)
deployment = deployment_repr(report)
if report['has_request']:
request_repr = (
"Request info:\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
val = report.get(field.lower())
if field == "QUERY_STRING":
val = clean_data_from_query_parameters(str(val))
request_repr += "- %s: \"%s\"\n" % (field, val)
else:
request_repr = "Request info: none\n"
message = ("%s\nError generated by %s\n\n%s\n\n%s\n\n%s"
% (logger_str, user_info, report['stack_trace'], deployment, request_repr))
mail_admins(format_subject(subject), message, fail_silently=True)
def do_report_error(deployment_name: str, type: str, report: Dict[str, Any]) -> HttpResponse:
report['deployment'] = deployment_name
if type == 'browser':
notify_browser_error(report)
elif type == 'server':
notify_server_error(report)
else:
return json_error(_("Invalid type parameter"))
return json_success()
| [
"str",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"str",
"str",
"Dict[str, Any]"
] | [
587,
735,
894,
1223,
1655,
1855,
2643,
3082,
3344,
4461,
5418,
5429,
5442
] | [
590,
749,
908,
1237,
1669,
1869,
2657,
3096,
3358,
4475,
5421,
5432,
5456
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/events.py | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import copy
import ujson
from collections import defaultdict
from django.utils.translation import ugettext as _
from django.conf import settings
from importlib import import_module
from typing import (
cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
)
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.attachments import user_attachments
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.bot_config import load_bot_config_template
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.message import (
aggregate_unread_data,
apply_unread_message_event,
get_raw_unread_data,
get_starred_message_ids,
)
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.soft_deactivation import maybe_catch_up_soft_deactivated_user
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import JsonableError
from zerver.lib.topic import TOPIC_NAME
from zerver.lib.topic_mutes import get_topic_mutes
from zerver.lib.actions import (
validate_user_access_to_subscribers_helper,
do_get_streams, get_default_streams_for_realm,
gather_subscriptions_helper, get_cross_realm_dicts,
get_status_dict, streams_to_dicts_sorted,
default_stream_groups_to_dicts_sorted,
get_owned_bot_dicts,
)
from zerver.lib.user_groups import user_groups_in_realm_serialized
from zerver.tornado.event_queue import request_event_queue, get_user_events
from zerver.models import Client, Message, Realm, UserPresence, UserProfile, CustomProfileFieldValue, \
get_user_profile_by_id, \
get_realm_user_dicts, realm_filters_for_realm, get_user,\
custom_profile_fields_for_realm, get_realm_domains, \
get_default_stream_groups, CustomProfileField, Stream
from zproject.backends import email_auth_enabled, password_auth_enabled
from version import ZULIP_VERSION
def get_raw_user_data(realm_id: int, client_gravatar: bool) -> Dict[int, Dict[str, str]]:
user_dicts = get_realm_user_dicts(realm_id)
# TODO: Consider optimizing this query away with caching.
custom_profile_field_values = CustomProfileFieldValue.objects.filter(user_profile__realm_id=realm_id)
profiles_by_user_id = defaultdict(dict) # type: Dict[int, Dict[str, Any]]
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
profiles_by_user_id[user_id][profile_field.field_id] = profile_field.value
def user_data(row: Dict[str, Any]) -> Dict[str, Any]:
avatar_url = get_avatar_field(
user_id=row['id'],
realm_id= realm_id,
email=row['email'],
avatar_source=row['avatar_source'],
avatar_version=row['avatar_version'],
medium=False,
client_gravatar=client_gravatar,
)
is_admin = row['is_realm_admin']
is_guest = row['is_guest']
is_bot = row['is_bot']
# This format should align with get_cross_realm_dicts() and notify_created_user
result = dict(
email=row['email'],
user_id=row['id'],
avatar_url=avatar_url,
is_admin=is_admin,
is_guest=is_guest,
is_bot=is_bot,
full_name=row['full_name'],
timezone=row['timezone'],
is_active = row['is_active'],
date_joined = row['date_joined'].isoformat(),
)
if not is_bot:
result['profile_data'] = profiles_by_user_id.get(row['id'], {})
return result
return {
row['id']: user_data(row)
for row in user_dicts
}
def always_want(msg_type: str) -> bool:
'''
This function is used as a helper in
fetch_initial_state_data, when the user passes
in None for event_types, and we want to fetch
info for every event type. Defining this at module
level makes it easier to mock.
'''
return True
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile: UserProfile,
event_types: Optional[Iterable[str]],
queue_id: str, client_gravatar: bool,
include_subscribers: bool = True) -> Dict[str, Any]:
state = {'queue_id': queue_id} # type: Dict[str, Any]
realm = user_profile.realm
if event_types is None:
# return True always
want = always_want # type: Callable[[str], bool]
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('custom_profile_fields'):
fields = custom_profile_fields_for_realm(realm.id)
state['custom_profile_fields'] = [f.as_dict() for f in fields]
state['custom_profile_field_types'] = CustomProfileField.FIELD_TYPE_CHOICES_DICT
if want('hotspots'):
state['hotspots'] = get_next_hotspots(user_profile)
if want('message'):
# The client should use get_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = get_topic_mutes(user_profile)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
for property_name in Realm.property_types:
state['realm_' + property_name] = getattr(realm, property_name)
# Most state is handled via the property_types framework;
# these manual entries are for those realm settings that don't
# fit into that framework.
state['realm_authentication_methods'] = realm.authentication_methods_dict()
state['realm_allow_message_editing'] = realm.allow_message_editing
state['realm_allow_community_topic_editing'] = realm.allow_community_topic_editing
state['realm_allow_message_deleting'] = realm.allow_message_deleting
state['realm_message_content_edit_limit_seconds'] = realm.message_content_edit_limit_seconds
state['realm_message_content_delete_limit_seconds'] = realm.message_content_delete_limit_seconds
state['realm_icon_url'] = realm_icon_url(realm)
state['realm_icon_source'] = realm.icon_source
state['max_icon_file_size'] = settings.MAX_ICON_FILE_SIZE
state['realm_bot_domain'] = realm.get_bot_domain()
state['realm_uri'] = realm.uri
state['realm_available_video_chat_providers'] = realm.VIDEO_CHAT_PROVIDERS
state['realm_presence_disabled'] = realm.presence_disabled
state['realm_digest_emails_enabled'] = realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS
state['realm_is_zephyr_mirror_realm'] = realm.is_zephyr_mirror_realm
state['realm_email_auth_enabled'] = email_auth_enabled(realm)
state['realm_password_auth_enabled'] = password_auth_enabled(realm)
state['realm_push_notifications_enabled'] = push_notifications_enabled()
if realm.notifications_stream and not realm.notifications_stream.deactivated:
notifications_stream = realm.notifications_stream
state['realm_notifications_stream_id'] = notifications_stream.id
else:
state['realm_notifications_stream_id'] = -1
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream:
state['realm_signup_notifications_stream_id'] = signup_notifications_stream.id
else:
state['realm_signup_notifications_stream_id'] = -1
if want('realm_domains'):
state['realm_domains'] = get_realm_domains(realm)
if want('realm_emoji'):
state['realm_emoji'] = realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(realm.id)
if want('realm_user_groups'):
state['realm_user_groups'] = user_groups_in_realm_serialized(realm)
if want('realm_user'):
state['raw_users'] = get_raw_user_data(
realm_id=realm.id,
client_gravatar=client_gravatar,
)
# For the user's own avatar URL, we force
# client_gravatar=False, since that saves some unnecessary
# client-side code for handing medium-size avatars. See #8253
# for details.
state['avatar_source'] = user_profile.avatar_source
state['avatar_url_medium'] = avatar_url(
user_profile,
medium=True,
client_gravatar=False,
)
state['avatar_url'] = avatar_url(
user_profile,
medium=False,
client_gravatar=False,
)
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
state['cross_realm_bots'] = list(get_cross_realm_dicts())
state['is_admin'] = user_profile.is_realm_admin
state['is_guest'] = user_profile.is_guest
state['user_id'] = user_profile.id
state['enter_sends'] = user_profile.enter_sends
state['email'] = user_profile.email
state['delivery_email'] = user_profile.delivery_email
state['full_name'] = user_profile.full_name
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# This does not yet have an apply_event counterpart, since currently,
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
if want('realm_embedded_bots'):
realm_embedded_bots = []
for bot in EMBEDDED_BOTS:
realm_embedded_bots.append({'name': bot.name,
'config': load_bot_config_template(bot.name)})
state['realm_embedded_bots'] = realm_embedded_bots
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags') and want('message'):
# Keeping unread_msgs updated requires both message flag updates and
# message updates. This is due to the fact that new messages will not
# generate a flag update so we need to use the flags field in the
# message event.
state['raw_unread_msgs'] = get_raw_unread_data(user_profile)
if want('starred_messages'):
state['starred_messages'] = get_starred_message_ids(user_profile)
if want('stream'):
state['streams'] = do_get_streams(user_profile)
state['stream_name_max_length'] = Stream.MAX_NAME_LENGTH
state['stream_description_max_length'] = Stream.MAX_DESCRIPTION_LENGTH
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(
get_default_streams_for_realm(realm.id))
if want('default_stream_groups'):
state['realm_default_stream_groups'] = default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm))
if want('update_display_settings'):
for prop in UserProfile.property_types:
state[prop] = getattr(user_profile, prop)
state['emojiset_choices'] = user_profile.emojiset_choices()
if want('update_global_notifications'):
for notification in UserProfile.notification_setting_types:
state[notification] = getattr(user_profile, notification)
if want('zulip_version'):
state['zulip_version'] = ZULIP_VERSION
return state
def remove_message_id_from_unread_mgs(state: Dict[str, Dict[str, Any]],
message_id: int) -> None:
raw_unread = state['raw_unread_msgs']
for key in ['pm_dict', 'stream_dict', 'huddle_dict']:
raw_unread[key].pop(message_id, None)
raw_unread['unmuted_stream_msgs'].discard(message_id)
raw_unread['mentions'].discard(message_id)
def apply_events(state: Dict[str, Any], events: Iterable[Dict[str, Any]],
user_profile: UserProfile, client_gravatar: bool,
include_subscribers: bool = True,
fetch_event_types: Optional[Iterable[str]] = None) -> None:
for event in events:
if fetch_event_types is not None and event['type'] not in fetch_event_types:
# TODO: continuing here is not, most precisely, correct.
# In theory, an event of one type, e.g. `realm_user`,
# could modify state that doesn't come from that
# `fetch_event_types` value, e.g. the `our_person` part of
# that code path. But it should be extremely rare, and
# fixing that will require a nontrivial refactor of
# `apply_event`. For now, be careful in your choice of
# `fetch_event_types`.
continue
apply_event(state, event, user_profile, client_gravatar, include_subscribers)
def apply_event(state: Dict[str, Any],
event: Dict[str, Any],
user_profile: UserProfile,
client_gravatar: bool,
include_subscribers: bool) -> None:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
if 'raw_unread_msgs' in state:
apply_unread_message_event(
user_profile,
state['raw_unread_msgs'],
event['message'],
event['flags'],
)
elif event['type'] == "hotspots":
state['hotspots'] = event['hotspots']
elif event['type'] == "custom_profile_fields":
state['custom_profile_fields'] = event['fields']
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
person_user_id = person['user_id']
if event['op'] == "add":
person = copy.deepcopy(person)
if client_gravatar:
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['is_active'] = True
if not person['is_bot']:
person['profile_data'] = {}
state['raw_users'][person_user_id] = person
elif event['op'] == "remove":
state['raw_users'][person_user_id]['is_active'] = False
elif event['op'] == 'update':
is_me = (person_user_id == user_profile.id)
if is_me:
if ('avatar_url' in person and 'avatar_url' in state):
state['avatar_source'] = person['avatar_source']
state['avatar_url'] = person['avatar_url']
state['avatar_url_medium'] = person['avatar_url_medium']
for field in ['is_admin', 'email', 'full_name']:
if field in person and field in state:
state[field] = person[field]
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state):
prev_state = state['raw_users'][user_profile.id]
was_admin = prev_state['is_admin']
now_admin = person['is_admin']
if was_admin and not now_admin:
state['realm_bots'] = []
if not was_admin and now_admin:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if client_gravatar and 'avatar_url' in person:
# Respect the client_gravatar setting in the `users` data.
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['avatar_url_medium'] = None
if person_user_id in state['raw_users']:
p = state['raw_users'][person_user_id]
for field in p:
if field in person:
p[field] = person[field]
if 'custom_profile_field' in person:
custom_field_id = person['custom_profile_field']['id']
custom_field_new_value = person['custom_profile_field']['value']
p['profile_data'][custom_field_id] = custom_field_new_value
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
for bot in state['realm_bots']:
if bot['email'] == email:
bot['is_active'] = False
if event['op'] == 'delete':
state['realm_bots'] = [item for item
in state['realm_bots'] if item['email'] != event['bot']['email']]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
if 'owner_id' in event['bot']:
bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email
else:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
if include_subscribers:
stream_data['subscribers'] = []
stream_data['stream_weekly_traffic'] = None
stream_data['is_old_stream'] = False
stream_data['is_announcement_only'] = False
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
state['streams'].append(stream)
state['streams'].sort(key=lambda elt: elt["name"])
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'default_stream_groups':
state['realm_default_stream_groups'] = event['default_stream_groups']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
# Tricky interaction: Whether we can create streams can get changed here.
if (field in ['realm_create_stream_by_admins_only',
'realm_waiting_period_threshold']) and 'can_create_streams' in state:
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
# It's a bit messy, but this is where we need to
# update the state for whether password authentication
# is enabled on this server.
if key == 'authentication_methods':
state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])
state['realm_email_auth_enabled'] = value['Email']
elif event['type'] == "subscription":
if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:
return
if event['op'] in ["add"]:
if not include_subscribers:
# Avoid letting 'subscribers' entries end up in the list
for i, sub in enumerate(event['subscriptions']):
event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])
del event['subscriptions'][i]['subscribers']
def name(sub: Dict[str, Any]) -> str:
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
if include_subscribers:
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
# TODO: Add user_id to presence update events / state format!
presence_user_profile = get_user(event['email'], user_profile.realm)
state['presences'][event['email']] = UserPresence.get_status_dict_by_user(
presence_user_profile)[event['email']]
elif event['type'] == "update_message":
# We don't return messages in /register, so we don't need to
# do anything for content updates, but we may need to update
# the unread_msgs data if the topic of an unread message changed.
if TOPIC_NAME in event:
stream_dict = state['raw_unread_msgs']['stream_dict']
topic = event[TOPIC_NAME]
for message_id in event['message_ids']:
if message_id in stream_dict:
stream_dict[message_id]['topic'] = topic
elif event['type'] == "delete_message":
max_message = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id').first()
if max_message:
state['max_message_id'] = max_message.id
else:
state['max_message_id'] = -1
remove_id = event['message_id']
remove_message_id_from_unread_mgs(state, remove_id)
elif event['type'] == "reaction":
# The client will get the message with the reactions directly
pass
elif event['type'] == "submessage":
# The client will get submessages with their messages
pass
elif event['type'] == 'typing':
# Typing notification events are transient and thus ignored
pass
elif event['type'] == "attachment":
# Attachment events are just for updating the "uploads" UI;
# they are not sent directly.
pass
elif event['type'] == "update_message_flags":
# We don't return messages in `/register`, so most flags we
# can ignore, but we do need to update the unread_msgs data if
# unread state is changed.
if event['flag'] == 'read' and event['operation'] == 'add':
for remove_id in event['messages']:
remove_message_id_from_unread_mgs(state, remove_id)
if event['flag'] == 'starred' and event['operation'] == 'add':
state['starred_messages'] += event['messages']
if event['flag'] == 'starred' and event['operation'] == 'remove':
state['starred_messages'] = [message for message in state['starred_messages']
if not (message in event['messages'])]
elif event['type'] == "realm_domains":
if event['op'] == 'add':
state['realm_domains'].append(event['realm_domain'])
elif event['op'] == 'change':
for realm_domain in state['realm_domains']:
if realm_domain['domain'] == event['realm_domain']['domain']:
realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']
elif event['op'] == 'remove':
state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']
if realm_domain['domain'] != event['domain']]
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
assert event['setting_name'] in UserProfile.property_types
state[event['setting_name']] = event['setting']
elif event['type'] == "update_global_notifications":
assert event['notification_name'] in UserProfile.notification_setting_types
state[event['notification_name']] = event['setting']
elif event['type'] == "invites_changed":
pass
elif event['type'] == "user_group":
if event['op'] == 'add':
state['realm_user_groups'].append(event['group'])
state['realm_user_groups'].sort(key=lambda group: group['id'])
elif event['op'] == 'update':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group.update(event['data'])
elif event['op'] == 'add_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group['members'].extend(event['user_ids'])
user_group['members'].sort()
elif event['op'] == 'remove_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
members = set(user_group['members'])
user_group['members'] = list(members - set(event['user_ids']))
user_group['members'].sort()
elif event['op'] == 'remove':
state['realm_user_groups'] = [ug for ug in state['realm_user_groups']
if ug['id'] != event['group_id']]
else:
raise AssertionError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile: UserProfile, user_client: Client,
apply_markdown: bool = True,
client_gravatar: bool = False,
event_types: Optional[Iterable[str]] = None,
queue_lifespan_secs: int = 0,
all_public_streams: bool = False,
include_subscribers: bool = True,
narrow: Iterable[Sequence[str]] = [],
fetch_event_types: Optional[Iterable[str]] = None) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
# Note that we pass event_types, not fetch_event_types here, since
# that's what controls which future events are sent.
queue_id = request_event_queue(user_profile, user_client, apply_markdown, client_gravatar,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if fetch_event_types is not None:
event_types_set = set(fetch_event_types) # type: Optional[Set[str]]
elif event_types is not None:
event_types_set = set(event_types)
else:
event_types_set = None
# Fill up the UserMessage rows if a soft-deactivated user has returned
maybe_catch_up_soft_deactivated_user(user_profile)
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id,
client_gravatar=client_gravatar,
include_subscribers=include_subscribers)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile, include_subscribers=include_subscribers,
client_gravatar=client_gravatar,
fetch_event_types=fetch_event_types)
post_process_state(ret)
if len(events) > 0:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def post_process_state(ret: Dict[str, Any]) -> None:
'''
NOTE:
Below is an example of post-processing initial state data AFTER we
apply events. For large payloads like `unread_msgs`, it's helpful
to have an intermediate data structure that is easy to manipulate
with O(1)-type operations as we apply events.
Then, only at the end, we put it in the form that's more appropriate
for client.
'''
if 'raw_unread_msgs' in ret:
ret['unread_msgs'] = aggregate_unread_data(ret['raw_unread_msgs'])
del ret['raw_unread_msgs']
'''
See the note above; the same technique applies below.
'''
if 'raw_users'in ret:
user_dicts = list(ret['raw_users'].values())
ret['realm_users'] = [d for d in user_dicts if d['is_active']]
ret['realm_non_active_users'] = [d for d in user_dicts if not d['is_active']]
'''
Be aware that we do intentional aliasing in the below code.
We can now safely remove the `is_active` field from all the
dicts that got partitioned into the two lists above.
We remove the field because it's already implied, and sending
it to clients makes clients prone to bugs where they "trust"
the field but don't actually update in live updates. It also
wastes bandwidth.
'''
for d in user_dicts:
d.pop('is_active')
del ret['raw_users']
| [
"int",
"bool",
"Dict[str, Any]",
"str",
"UserProfile",
"Optional[Iterable[str]]",
"str",
"bool",
"Dict[str, Dict[str, Any]]",
"int",
"Dict[str, Any]",
"Iterable[Dict[str, Any]]",
"UserProfile",
"bool",
"Dict[str, Any]",
"Dict[str, Any]",
"UserProfile",
"bool",
"bool",
"Dict[str, Any]",
"UserProfile",
"Client",
"Dict[str, Any]"
] | [
2249,
2271,
2812,
3986,
4593,
4648,
4712,
4734,
12610,
12687,
12979,
13003,
13060,
13090,
13967,
14006,
14052,
14098,
14141,
22547,
30548,
30574,
32862
] | [
2252,
2275,
2826,
3989,
4604,
4671,
4715,
4738,
12635,
12690,
12993,
13027,
13071,
13094,
13981,
14020,
14063,
14102,
14145,
22561,
30559,
30580,
32876
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/exceptions.py | from enum import Enum
from typing import Any, Dict, List, Optional, Type
from mypy_extensions import NoReturn
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
class AbstractEnum(Enum):
'''An enumeration whose members are used strictly for their names.'''
def __new__(cls: Type['AbstractEnum']) -> 'AbstractEnum':
obj = object.__new__(cls)
obj._value_ = len(cls.__members__) + 1
return obj
# Override all the `Enum` methods that use `_value_`.
def __repr__(self) -> str:
return str(self) # nocoverage
def value(self) -> None:
raise AssertionError("Not implemented")
def __reduce_ex__(self, proto: int) -> NoReturn:
raise AssertionError("Not implemented")
class ErrorCode(AbstractEnum):
BAD_REQUEST = () # Generic name, from the name of HTTP 400.
REQUEST_VARIABLE_MISSING = ()
REQUEST_VARIABLE_INVALID = ()
INVALID_JSON = ()
BAD_IMAGE = ()
REALM_UPLOAD_QUOTA = ()
BAD_NARROW = ()
MISSING_HTTP_EVENT_HEADER = ()
STREAM_DOES_NOT_EXIST = ()
UNAUTHORIZED_PRINCIPAL = ()
UNEXPECTED_WEBHOOK_EVENT_TYPE = ()
BAD_EVENT_QUEUE_ID = ()
CSRF_FAILED = ()
INVITATION_FAILED = ()
INVALID_ZULIP_SERVER = ()
REQUEST_CONFUSING_VAR = ()
class JsonableError(Exception):
'''A standardized error format we can turn into a nice JSON HTTP response.
This class can be invoked in a couple ways.
* Easiest, but completely machine-unreadable:
raise JsonableError(_("No such widget: {}").format(widget_name))
The message may be passed through to clients and shown to a user,
so translation is required. Because the text will vary depending
on the user's language, it's not possible for code to distinguish
this error from others in a non-buggy way.
* Fully machine-readable, with an error code and structured data:
class NoSuchWidgetError(JsonableError):
code = ErrorCode.NO_SUCH_WIDGET
data_fields = ['widget_name']
def __init__(self, widget_name: str) -> None:
self.widget_name = widget_name # type: str
@staticmethod
def msg_format() -> str:
return _("No such widget: {widget_name}")
raise NoSuchWidgetError(widget_name)
Now both server and client code see a `widget_name` attribute
and an error code.
Subclasses may also override `http_status_code`.
'''
# Override this in subclasses, as needed.
code = ErrorCode.BAD_REQUEST # type: ErrorCode
# Override this in subclasses if providing structured data.
data_fields = [] # type: List[str]
# Optionally override this in subclasses to return a different HTTP status,
# like 403 or 404.
http_status_code = 400 # type: int
def __init__(self, msg: str) -> None:
# `_msg` is an implementation detail of `JsonableError` itself.
self._msg = msg # type: str
@staticmethod
def msg_format() -> str:
'''Override in subclasses. Gets the items in `data_fields` as format args.
This should return (a translation of) a string literal.
The reason it's not simply a class attribute is to allow
translation to work.
'''
# Secretly this gets one more format arg not in `data_fields`: `_msg`.
# That's for the sake of the `JsonableError` base logic itself, for
# the simplest form of use where we just get a plain message string
# at construction time.
return '{_msg}'
#
# Infrastructure -- not intended to be overridden in subclasses.
#
@property
def msg(self) -> str:
format_data = dict(((f, getattr(self, f)) for f in self.data_fields),
_msg=getattr(self, '_msg', None))
return self.msg_format().format(**format_data)
@property
def data(self) -> Dict[str, Any]:
return dict(((f, getattr(self, f)) for f in self.data_fields),
code=self.code.name)
def to_json(self) -> Dict[str, Any]:
d = {'result': 'error', 'msg': self.msg}
d.update(self.data)
return d
def __str__(self) -> str:
return self.msg
class StreamDoesNotExistError(JsonableError):
code = ErrorCode.STREAM_DOES_NOT_EXIST
data_fields = ['stream']
def __init__(self, stream: str) -> None:
self.stream = stream
@staticmethod
def msg_format() -> str:
return _("Stream '{stream}' does not exist")
class RateLimited(PermissionDenied):
def __init__(self, msg: str="") -> None:
super().__init__(msg)
class InvalidJSONError(JsonableError):
code = ErrorCode.INVALID_JSON
@staticmethod
def msg_format() -> str:
return _("Malformed JSON")
class BugdownRenderingException(Exception):
pass
| [
"Type['AbstractEnum']",
"int",
"str",
"str"
] | [
337,
722,
2912,
4449
] | [
357,
725,
2915,
4452
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/export.py | import datetime
from boto.s3.connection import S3Connection
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.forms.models import model_to_dict
from django.utils.timezone import make_aware as timezone_make_aware
from django.utils.timezone import utc as timezone_utc
from django.utils.timezone import is_naive as timezone_is_naive
import glob
import logging
import os
import ujson
import subprocess
import tempfile
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.models import UserProfile, Realm, Client, Huddle, Stream, \
UserMessage, Subscription, Message, RealmEmoji, RealmFilter, Reaction, \
RealmDomain, Recipient, DefaultStream, get_user_profile_by_id, \
UserPresence, UserActivity, UserActivityInterval, CustomProfileField, \
CustomProfileFieldValue, get_display_recipient, Attachment, get_system_bot, \
RealmAuditLog, UserHotspot, MutedTopic, Service, UserGroup, \
UserGroupMembership, BotStorageData, BotConfigData
from zerver.lib.parallel import run_parallel
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, \
Iterable, Union
# Custom mypy types follow:
Record = Dict[str, Any]
TableName = str
TableData = Dict[TableName, List[Record]]
Field = str
Path = str
Context = Dict[str, Any]
FilterArgs = Dict[str, Any]
IdSource = Tuple[TableName, Field]
SourceFilter = Callable[[Record], bool]
# These next two types are callbacks, which mypy does not
# support well, because PEP 484 says "using callbacks
# with keyword arguments is not perceived as a common use case."
# CustomFetch = Callable[[TableData, Config, Context], None]
# PostProcessData = Callable[[TableData, Config, Context], None]
CustomFetch = Any # TODO: make more specific, see above
PostProcessData = Any # TODO: make more specific
# The keys of our MessageOutput variables are normally
# List[Record], but when we write partials, we can get
# lists of integers or a single integer.
# TODO: This could maybe be improved using TypedDict?
MessageOutput = Dict[str, Union[List[Record], List[int], int]]
MESSAGE_BATCH_CHUNK_SIZE = 1000
realm_tables = [("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter")] # List[Tuple[TableName, Any, str]]
ALL_ZULIP_TABLES = {
'analytics_anomaly',
'analytics_fillstate',
'analytics_installationcount',
'analytics_realmcount',
'analytics_streamcount',
'analytics_usercount',
'otp_static_staticdevice',
'otp_static_statictoken',
'otp_totp_totpdevice',
'social_auth_association',
'social_auth_code',
'social_auth_nonce',
'social_auth_partial',
'social_auth_usersocialauth',
'two_factor_phonedevice',
'zerver_archivedattachment',
'zerver_archivedattachment_messages',
'zerver_archivedmessage',
'zerver_archivedusermessage',
'zerver_attachment',
'zerver_attachment_messages',
'zerver_botconfigdata',
'zerver_botstoragedata',
'zerver_client',
'zerver_customprofilefield',
'zerver_customprofilefieldvalue',
'zerver_defaultstream',
'zerver_defaultstreamgroup',
'zerver_defaultstreamgroup_streams',
'zerver_emailchangestatus',
'zerver_huddle',
'zerver_message',
'zerver_multiuseinvite',
'zerver_multiuseinvite_streams',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_reaction',
'zerver_realm',
'zerver_realmauditlog',
'zerver_realmdomain',
'zerver_realmemoji',
'zerver_realmfilter',
'zerver_recipient',
'zerver_scheduledemail',
'zerver_scheduledmessage',
'zerver_service',
'zerver_stream',
'zerver_submessage',
'zerver_subscription',
'zerver_useractivity',
'zerver_useractivityinterval',
'zerver_usergroup',
'zerver_usergroupmembership',
'zerver_userhotspot',
'zerver_usermessage',
'zerver_userpresence',
'zerver_userprofile',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
'zerver_mutedtopic',
}
NON_EXPORTED_TABLES = {
# These invitation/confirmation flow tables don't make sense to
# export, since invitations links will be broken by the server URL
# change anyway:
'zerver_emailchangestatus',
'zerver_multiuseinvite',
'zerver_multiuseinvite_streams',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
# When switching servers, clients will need to re-login and
# reregister for push notifications anyway.
'zerver_pushdevicetoken',
# We don't use these generated Django tables
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
# These is used for scheduling future activity; it could make
# sense to export, but is relatively low value.
'zerver_scheduledemail',
'zerver_scheduledmessage',
# These tables are related to a user's 2FA authentication
# configuration, which will need to be re-setup on the new server.
'two_factor_phonedevice',
'otp_static_staticdevice',
'otp_static_statictoken',
'otp_totp_totpdevice',
# These archive tables should not be exported (they are to support
# restoring content accidentally deleted due to software bugs in
# the retention policy feature)
'zerver_archivedmessage',
'zerver_archivedusermessage',
'zerver_archivedattachment',
'zerver_archivedattachment_messages',
# Social auth tables are not needed post-export, since we don't
# use any of this state outside of a direct authentication flow.
'social_auth_association',
'social_auth_code',
'social_auth_nonce',
'social_auth_partial',
'social_auth_usersocialauth',
# We will likely never want to migrate this table, since it's a
# total of all the realmcount values on the server. Might need to
# recompute it after a fillstate import.
'analytics_installationcount',
# These analytics tables, however, should ideally be in the export.
'analytics_realmcount',
'analytics_streamcount',
'analytics_usercount',
# Fillstate will require some cleverness to do the right partial export.
'analytics_fillstate',
# This table isn't yet used for anything.
'analytics_anomaly',
# These are for unfinished features; we'll want to add them ot the
# export before they reach full production status.
'zerver_defaultstreamgroup',
'zerver_defaultstreamgroup_streams',
'zerver_submessage',
# For any tables listed below here, it's a bug that they are not present in the export.
}
IMPLICIT_TABLES = {
# ManyToMany relationships are exported implicitly.
'zerver_attachment_messages',
}
ATTACHMENT_TABLES = {
'zerver_attachment',
}
MESSAGE_TABLES = {
# message tables get special treatment, because they're so big
'zerver_message',
'zerver_usermessage',
# zerver_reaction belongs here, since it's added late
'zerver_reaction',
}
DATE_FIELDS = {
'zerver_attachment': ['create_time'],
'zerver_message': ['last_edit_time', 'pub_date'],
'zerver_realm': ['date_created'],
'zerver_stream': ['date_created'],
'zerver_useractivity': ['last_visit'],
'zerver_useractivityinterval': ['start', 'end'],
'zerver_userpresence': ['timestamp'],
'zerver_userprofile': ['date_joined', 'last_login', 'last_reminder'],
'zerver_realmauditlog': ['event_time'],
'zerver_userhotspot': ['timestamp'],
} # type: Dict[TableName, List[Field]]
def sanity_check_output(data: TableData) -> None:
# First, we verify that the export tool has a declared
# configuration for every table.
target_models = (
list(apps.get_app_config('analytics').get_models(include_auto_created=True)) +
list(apps.get_app_config('django_otp').get_models(include_auto_created=True)) +
list(apps.get_app_config('otp_static').get_models(include_auto_created=True)) +
list(apps.get_app_config('otp_totp').get_models(include_auto_created=True)) +
list(apps.get_app_config('social_django').get_models(include_auto_created=True)) +
list(apps.get_app_config('two_factor').get_models(include_auto_created=True)) +
list(apps.get_app_config('zerver').get_models(include_auto_created=True))
)
all_tables_db = set(model._meta.db_table for model in target_models)
# These assertion statements will fire when we add a new database
# table that is not included in Zulip's data exports. Generally,
# you can add your new table to `ALL_ZULIP_TABLES` and
# `NON_EXPORTED_TABLES` during early work on a new feature so that
# CI passes.
#
# We'll want to make sure we handle it for exports before
# releasing the new feature, but doing so correctly requires some
# expertise on this export system.
assert ALL_ZULIP_TABLES == all_tables_db
assert NON_EXPORTED_TABLES.issubset(ALL_ZULIP_TABLES)
assert IMPLICIT_TABLES.issubset(ALL_ZULIP_TABLES)
assert ATTACHMENT_TABLES.issubset(ALL_ZULIP_TABLES)
tables = set(ALL_ZULIP_TABLES)
tables -= NON_EXPORTED_TABLES
tables -= IMPLICIT_TABLES
tables -= MESSAGE_TABLES
tables -= ATTACHMENT_TABLES
for table in tables:
if table not in data:
logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,))
def write_data_to_file(output_file: Path, data: Any) -> None:
with open(output_file, "w") as f:
f.write(ujson.dumps(data, indent=4))
def make_raw(query: Any, exclude: Optional[List[Field]]=None) -> List[Record]:
'''
Takes a Django query and returns a JSONable list
of dictionaries corresponding to the database rows.
'''
rows = []
for instance in query:
data = model_to_dict(instance, exclude=exclude)
"""
In Django 1.11.5, model_to_dict evaluates the QuerySet of
many-to-many field to give us a list of instances. We require
a list of primary keys, so we get the primary keys from the
instances below.
"""
for field in instance._meta.many_to_many:
value = data[field.name]
data[field.name] = [row.id for row in value]
rows.append(data)
return rows
def floatify_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field in DATE_FIELDS[table]:
orig_dt = item[field]
if orig_dt is None:
continue
if timezone_is_naive(orig_dt):
logging.warning("Naive datetime:", item)
dt = timezone_make_aware(orig_dt)
else:
dt = orig_dt
utc_naive = dt.replace(tzinfo=None) - dt.utcoffset()
item[field] = (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class Config:
'''
A Config object configures a single table for exporting (and,
maybe some day importing as well.
You should never mutate Config objects as part of the export;
instead use the data to determine how you populate other
data structures.
There are parent/children relationships between Config objects.
The parent should be instantiated first. The child will
append itself to the parent's list of children.
'''
def __init__(self, table: Optional[str]=None,
model: Optional[Any]=None,
normal_parent: Optional['Config']=None,
virtual_parent: Optional['Config']=None,
filter_args: Optional[FilterArgs]=None,
custom_fetch: Optional[CustomFetch]=None,
custom_tables: Optional[List[TableName]]=None,
post_process_data: Optional[PostProcessData]=None,
concat_and_destroy: Optional[List[TableName]]=None,
id_source: Optional[IdSource]=None,
source_filter: Optional[SourceFilter]=None,
parent_key: Optional[Field]=None,
use_all: bool=False,
is_seeded: bool=False,
exclude: Optional[List[Field]]=None) -> None:
assert table or custom_tables
self.table = table
self.model = model
self.normal_parent = normal_parent
self.virtual_parent = virtual_parent
self.filter_args = filter_args
self.parent_key = parent_key
self.use_all = use_all
self.is_seeded = is_seeded
self.exclude = exclude
self.custom_fetch = custom_fetch
self.custom_tables = custom_tables
self.post_process_data = post_process_data
self.concat_and_destroy = concat_and_destroy
self.id_source = id_source
self.source_filter = source_filter
self.children = [] # type: List[Config]
if normal_parent is not None:
self.parent = normal_parent # type: Optional[Config]
else:
self.parent = None
if virtual_parent is not None and normal_parent is not None:
raise AssertionError('''
If you specify a normal_parent, please
do not create a virtual_parent.
''')
if normal_parent is not None:
normal_parent.children.append(self)
elif virtual_parent is not None:
virtual_parent.children.append(self)
elif is_seeded is None:
raise AssertionError('''
You must specify a parent if you are
not using is_seeded.
''')
if self.id_source is not None:
if self.virtual_parent is None:
raise AssertionError('''
You must specify a virtual_parent if you are
using id_source.''')
if self.id_source[0] != self.virtual_parent.table:
raise AssertionError('''
Configuration error. To populate %s, you
want data from %s, but that differs from
the table name of your virtual parent (%s),
which suggests you many not have set up
the ordering correctly. You may simply
need to assign a virtual_parent, or there
may be deeper issues going on.''' % (
self.table,
self.id_source[0],
self.virtual_parent.table))
def export_from_config(response: TableData, config: Config, seed_object: Optional[Any]=None,
context: Optional[Context]=None) -> None:
table = config.table
parent = config.parent
model = config.model
if context is None:
context = {}
if table:
exported_tables = [table]
else:
if config.custom_tables is None:
raise AssertionError('''
You must specify config.custom_tables if you
are not specifying config.table''')
exported_tables = config.custom_tables
for t in exported_tables:
logging.info('Exporting via export_from_config: %s' % (t,))
rows = None
if config.is_seeded:
rows = [seed_object]
elif config.custom_fetch:
config.custom_fetch(
response=response,
config=config,
context=context
)
if config.custom_tables:
for t in config.custom_tables:
if t not in response:
raise AssertionError('Custom fetch failed to populate %s' % (t,))
elif config.concat_and_destroy:
# When we concat_and_destroy, we are working with
# temporary "tables" that are lists of records that
# should already be ready to export.
data = [] # type: List[Record]
for t in config.concat_and_destroy:
data += response[t]
del response[t]
logging.info('Deleted temporary %s' % (t,))
assert table is not None
response[table] = data
elif config.use_all:
assert model is not None
query = model.objects.all()
rows = list(query)
elif config.normal_parent:
# In this mode, our current model is figuratively Article,
# and normal_parent is figuratively Blog, and
# now we just need to get all the articles
# contained by the blogs.
model = config.model
assert parent is not None
assert parent.table is not None
assert config.parent_key is not None
parent_ids = [r['id'] for r in response[parent.table]]
filter_parms = {config.parent_key: parent_ids} # type: Dict[str, Any]
if config.filter_args is not None:
filter_parms.update(config.filter_args)
assert model is not None
query = model.objects.filter(**filter_parms)
rows = list(query)
elif config.id_source:
# In this mode, we are the figurative Blog, and we now
# need to look at the current response to get all the
# blog ids from the Article rows we fetched previously.
model = config.model
assert model is not None
# This will be a tuple of the form ('zerver_article', 'blog').
(child_table, field) = config.id_source
child_rows = response[child_table]
if config.source_filter:
child_rows = [r for r in child_rows if config.source_filter(r)]
lookup_ids = [r[field] for r in child_rows]
filter_parms = dict(id__in=lookup_ids)
if config.filter_args:
filter_parms.update(config.filter_args)
query = model.objects.filter(**filter_parms)
rows = list(query)
# Post-process rows (which won't apply to custom fetches/concats)
if rows is not None:
assert table is not None # Hint for mypy
response[table] = make_raw(rows, exclude=config.exclude)
if table in DATE_FIELDS:
floatify_datetime_fields(response, table)
if config.post_process_data:
config.post_process_data(
response=response,
config=config,
context=context
)
# Now walk our children. It's extremely important to respect
# the order of children here.
for child_config in config.children:
export_from_config(
response=response,
config=child_config,
context=context,
)
def get_realm_config() -> Config:
# This is common, public information about the realm that we can share
# with all realm users.
realm_config = Config(
table='zerver_realm',
is_seeded=True
)
Config(
table='zerver_defaultstream',
model=DefaultStream,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_customprofilefield',
model=CustomProfileField,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmemoji',
model=RealmEmoji,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmdomain',
model=RealmDomain,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmfilter',
model=RealmFilter,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_client',
model=Client,
virtual_parent=realm_config,
use_all=True
)
user_profile_config = Config(
custom_tables=[
'zerver_userprofile',
'zerver_userprofile_mirrordummy',
],
# set table for children who treat us as normal parent
table='zerver_userprofile',
virtual_parent=realm_config,
custom_fetch=fetch_user_profile,
)
user_groups_config = Config(
table='zerver_usergroup',
model=UserGroup,
normal_parent=realm_config,
parent_key='realm__in',
)
Config(
table='zerver_usergroupmembership',
model=UserGroupMembership,
normal_parent=user_groups_config,
parent_key='user_group__in',
)
Config(
custom_tables=[
'zerver_userprofile_crossrealm',
],
virtual_parent=user_profile_config,
custom_fetch=fetch_user_profile_cross_realm,
)
Config(
table='zerver_userpresence',
model=UserPresence,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_customprofilefieldvalue',
model=CustomProfileFieldValue,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivity',
model=UserActivity,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivityinterval',
model=UserActivityInterval,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_realmauditlog',
model=RealmAuditLog,
normal_parent=user_profile_config,
parent_key='modified_user__in',
)
Config(
table='zerver_userhotspot',
model=UserHotspot,
normal_parent=user_profile_config,
parent_key='user__in',
)
Config(
table='zerver_mutedtopic',
model=MutedTopic,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_service',
model=Service,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_botstoragedata',
model=BotStorageData,
normal_parent=user_profile_config,
parent_key='bot_profile__in',
)
Config(
table='zerver_botconfigdata',
model=BotConfigData,
normal_parent=user_profile_config,
parent_key='bot_profile__in',
)
# Some of these tables are intermediate "tables" that we
# create only for the export. Think of them as similar to views.
user_subscription_config = Config(
table='_user_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.PERSONAL},
parent_key='user_profile__in',
)
Config(
table='_user_recipient',
model=Recipient,
virtual_parent=user_subscription_config,
id_source=('_user_subscription', 'recipient'),
)
#
stream_subscription_config = Config(
table='_stream_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.STREAM},
parent_key='user_profile__in',
)
stream_recipient_config = Config(
table='_stream_recipient',
model=Recipient,
virtual_parent=stream_subscription_config,
id_source=('_stream_subscription', 'recipient'),
)
Config(
table='zerver_stream',
model=Stream,
virtual_parent=stream_recipient_config,
id_source=('_stream_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
post_process_data=sanity_check_stream_data
)
#
Config(
custom_tables=[
'_huddle_recipient',
'_huddle_subscription',
'zerver_huddle',
],
normal_parent=user_profile_config,
custom_fetch=fetch_huddle_objects,
)
# Now build permanent tables from our temp tables.
Config(
table='zerver_recipient',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_recipient',
'_stream_recipient',
'_huddle_recipient',
],
)
Config(
table='zerver_subscription',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_subscription',
'_stream_subscription',
'_huddle_subscription',
]
)
return realm_config
def sanity_check_stream_data(response: TableData, config: Config, context: Context) -> None:
if context['exportable_user_ids'] is not None:
# If we restrict which user ids are exportable,
# the way that we find # streams is a little too
# complex to have a sanity check.
return
actual_streams = set([stream.name for stream in Stream.objects.filter(
realm=response["zerver_realm"][0]['id'])])
streams_in_response = set([stream['name'] for stream in response['zerver_stream']])
if len(streams_in_response - actual_streams) > 0:
print("Error: Streams not present in the realm were exported:")
print(" ", streams_in_response - actual_streams)
print("This is likely due to a bug in the export tool.")
raise AssertionError("Aborting! Please investigate.")
if len(actual_streams - streams_in_response) > 0:
print("Error: Some streams present in the realm were not exported:")
print(" ", actual_streams - streams_in_response)
print("Usually, this is caused by a stream having been created that never had subscribers.")
print("(Due to a bug elsewhere in Zulip, not in the export tool)")
raise AssertionError("Aborting! Please investigate.")
def fetch_user_profile(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
exportable_user_ids = context['exportable_user_ids']
query = UserProfile.objects.filter(realm_id=realm.id)
exclude = ['password', 'api_key']
rows = make_raw(list(query), exclude=exclude)
normal_rows = [] # type: List[Record]
dummy_rows = [] # type: List[Record]
for row in rows:
if exportable_user_ids is not None:
if row['id'] in exportable_user_ids:
assert not row['is_mirror_dummy']
else:
# Convert non-exportable users to
# inactive is_mirror_dummy users.
row['is_mirror_dummy'] = True
row['is_active'] = False
if row['is_mirror_dummy']:
dummy_rows.append(row)
else:
normal_rows.append(row)
response['zerver_userprofile'] = normal_rows
response['zerver_userprofile_mirrordummy'] = dummy_rows
def fetch_user_profile_cross_realm(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
response['zerver_userprofile_crossrealm'] = []
if realm.string_id == settings.SYSTEM_BOT_REALM:
return
for bot_user in [
get_system_bot(settings.NOTIFICATION_BOT),
get_system_bot(settings.EMAIL_GATEWAY_BOT),
get_system_bot(settings.WELCOME_BOT),
]:
recipient_id = Recipient.objects.get(type_id=bot_user.id, type=Recipient.PERSONAL).id
response['zerver_userprofile_crossrealm'].append(dict(
email=bot_user.email,
id=bot_user.id,
recipient_id=recipient_id,
))
def fetch_attachment_data(response: TableData, realm_id: int, message_ids: Set[int]) -> None:
filter_args = {'realm_id': realm_id}
query = Attachment.objects.filter(**filter_args)
response['zerver_attachment'] = make_raw(list(query))
floatify_datetime_fields(response, 'zerver_attachment')
'''
We usually export most messages for the realm, but not
quite ALL messages for the realm. So, we need to
clean up our attachment data to have correct
values for response['zerver_attachment'][<n>]['messages'].
'''
for row in response['zerver_attachment']:
filterer_message_ids = set(row['messages']).intersection(message_ids)
row['messages'] = sorted(list(filterer_message_ids))
'''
Attachments can be connected to multiple messages, although
it's most common to have just one message. Regardless,
if none of those message(s) survived the filtering above
for a particular attachment, then we won't export the
attachment row.
'''
response['zerver_attachment'] = [
row for row in response['zerver_attachment']
if row['messages']]
def fetch_reaction_data(response: TableData, message_ids: Set[int]) -> None:
query = Reaction.objects.filter(message_id__in=list(message_ids))
response['zerver_reaction'] = make_raw(list(query))
def fetch_huddle_objects(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
assert config.parent is not None
assert config.parent.table is not None
user_profile_ids = set(r['id'] for r in response[config.parent.table])
# First we get all huddles involving someone in the realm.
realm_huddle_subs = Subscription.objects.select_related("recipient").filter(
recipient__type=Recipient.HUDDLE, user_profile__in=user_profile_ids)
realm_huddle_recipient_ids = set(sub.recipient_id for sub in realm_huddle_subs)
# Mark all Huddles whose recipient ID contains a cross-realm user.
unsafe_huddle_recipient_ids = set()
for sub in Subscription.objects.select_related().filter(recipient__in=realm_huddle_recipient_ids):
if sub.user_profile.realm != realm:
# In almost every case the other realm will be zulip.com
unsafe_huddle_recipient_ids.add(sub.recipient_id)
# Now filter down to just those huddles that are entirely within the realm.
#
# This is important for ensuring that the User objects needed
# to import it on the other end exist (since we're only
# exporting the users from this realm), at the cost of losing
# some of these cross-realm messages.
huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]
huddle_recipient_ids = set(sub.recipient_id for sub in huddle_subs)
huddle_ids = set(sub.recipient.type_id for sub in huddle_subs)
huddle_subscription_dicts = make_raw(huddle_subs)
huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))
response['_huddle_recipient'] = huddle_recipients
response['_huddle_subscription'] = huddle_subscription_dicts
response['zerver_huddle'] = make_raw(Huddle.objects.filter(id__in=huddle_ids))
def fetch_usermessages(realm: Realm,
message_ids: Set[int],
user_profile_ids: Set[int],
message_filename: Path) -> List[Record]:
# UserMessage export security rule: You can export UserMessages
# for the messages you exported for the users in your realm.
user_message_query = UserMessage.objects.filter(user_profile__realm=realm,
message_id__in=message_ids)
user_message_chunk = []
for user_message in user_message_query:
if user_message.user_profile_id not in user_profile_ids:
continue
user_message_obj = model_to_dict(user_message)
user_message_obj['flags_mask'] = user_message.flags.mask
del user_message_obj['flags']
user_message_chunk.append(user_message_obj)
logging.info("Fetched UserMessages for %s" % (message_filename,))
return user_message_chunk
def export_usermessages_batch(input_path: Path, output_path: Path) -> None:
"""As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command)."""
with open(input_path, "r") as input_file:
output = ujson.loads(input_file.read())
message_ids = [item['id'] for item in output['zerver_message']]
user_profile_ids = set(output['zerver_userprofile_ids'])
del output['zerver_userprofile_ids']
realm = Realm.objects.get(id=output['realm_id'])
del output['realm_id']
output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids, output_path)
write_message_export(output_path, output)
os.unlink(input_path)
def write_message_export(message_filename: Path, output: MessageOutput) -> None:
write_data_to_file(output_file=message_filename, data=output)
logging.info("Dumped to %s" % (message_filename,))
def export_partial_message_files(realm: Realm,
response: TableData,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE,
output_dir: Optional[Path]=None) -> Set[int]:
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export")
def get_ids(records: List[Record]) -> Set[int]:
return set(x['id'] for x in records)
# Basic security rule: You can export everything either...
# - sent by someone in your exportable_user_ids
# OR
# - received by someone in your exportable_user_ids (which
# equates to a recipient object we are exporting)
#
# TODO: In theory, you should be able to export messages in
# cross-realm PM threads; currently, this only exports cross-realm
# messages received by your realm that were sent by Zulip system
# bots (e.g. emailgateway, notification-bot).
# Here, "we" and "us" refers to the inner circle of users who
# were specified as being allowed to be exported. "Them"
# refers to other users.
user_ids_for_us = get_ids(
response['zerver_userprofile']
)
recipient_ids_for_us = get_ids(response['zerver_recipient'])
ids_of_our_possible_senders = get_ids(
response['zerver_userprofile'] +
response['zerver_userprofile_mirrordummy'] +
response['zerver_userprofile_crossrealm'])
ids_of_non_exported_possible_recipients = ids_of_our_possible_senders - user_ids_for_us
recipients_for_them = Recipient.objects.filter(
type=Recipient.PERSONAL,
type_id__in=ids_of_non_exported_possible_recipients).values("id")
recipient_ids_for_them = get_ids(recipients_for_them)
# We capture most messages here, since the
# recipients we subscribe to are also the
# recipients of most messages we send.
messages_we_received = Message.objects.filter(
sender__in=ids_of_our_possible_senders,
recipient__in=recipient_ids_for_us,
).order_by('id')
# This should pick up stragglers; messages we sent
# where we the recipient wasn't subscribed to by any of
# us (such as PMs to "them").
messages_we_sent_to_them = Message.objects.filter(
sender__in=user_ids_for_us,
recipient__in=recipient_ids_for_them,
).order_by('id')
message_queries = [
messages_we_received,
messages_we_sent_to_them
]
all_message_ids = set() # type: Set[int]
dump_file_id = 1
for message_query in message_queries:
dump_file_id = write_message_partial_for_query(
realm=realm,
message_query=message_query,
dump_file_id=dump_file_id,
all_message_ids=all_message_ids,
output_dir=output_dir,
user_profile_ids=user_ids_for_us,
chunk_size=chunk_size,
)
return all_message_ids
def write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_id: int,
all_message_ids: Set[int], output_dir: Path,
user_profile_ids: Set[int],
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> int:
min_id = -1
while True:
actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]
message_chunk = make_raw(actual_query)
message_ids = set(m['id'] for m in message_chunk)
assert len(message_ids.intersection(all_message_ids)) == 0
all_message_ids.update(message_ids)
if len(message_chunk) == 0:
break
# Figure out the name of our shard file.
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
message_filename += '.partial'
logging.info("Fetched Messages for %s" % (message_filename,))
# Clean up our messages.
table_data = {} # type: TableData
table_data['zerver_message'] = message_chunk
floatify_datetime_fields(table_data, 'zerver_message')
# Build up our output for the .partial file, which needs
# a list of user_profile_ids to search for (as well as
# the realm id).
output = {} # type: MessageOutput
output['zerver_message'] = table_data['zerver_message']
output['zerver_userprofile_ids'] = list(user_profile_ids)
output['realm_id'] = realm.id
# And write the data.
write_message_export(message_filename, output)
min_id = max(message_ids)
dump_file_id += 1
return dump_file_id
def export_uploads_and_avatars(realm: Realm, output_dir: Path) -> None:
uploads_output_dir = os.path.join(output_dir, 'uploads')
avatars_output_dir = os.path.join(output_dir, 'avatars')
emoji_output_dir = os.path.join(output_dir, 'emoji')
for output_dir in (uploads_output_dir, avatars_output_dir, emoji_output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if settings.LOCAL_UPLOADS_DIR:
# Small installations and developers will usually just store files locally.
export_uploads_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "files"),
output_dir=uploads_output_dir)
export_avatars_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=avatars_output_dir)
export_emoji_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=emoji_output_dir)
else:
# Some bigger installations will have their data stored on S3.
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=avatars_output_dir,
processing_avatars=True)
export_files_from_s3(realm,
settings.S3_AUTH_UPLOADS_BUCKET,
output_dir=uploads_output_dir)
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=emoji_output_dir,
processing_emoji=True)
def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path,
processing_avatars: bool=False,
processing_emoji: bool=False) -> None:
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records = []
logging.info("Downloading uploaded files from %s" % (bucket_name))
avatar_hash_values = set()
user_ids = set()
if processing_avatars:
bucket_list = bucket.list()
for user_profile in UserProfile.objects.filter(realm=realm):
avatar_path = user_avatar_path_from_ids(user_profile.id, realm.id)
avatar_hash_values.add(avatar_path)
avatar_hash_values.add(avatar_path + ".original")
user_ids.add(user_profile.id)
if processing_emoji:
bucket_list = bucket.list(prefix="%s/emoji/images/" % (realm.id,))
else:
bucket_list = bucket.list(prefix="%s/" % (realm.id,))
if settings.EMAIL_GATEWAY_BOT is not None:
email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT) # type: Optional[UserProfile]
else:
email_gateway_bot = None
count = 0
for bkey in bucket_list:
if processing_avatars and bkey.name not in avatar_hash_values:
continue
key = bucket.get_key(bkey.name)
# This can happen if an email address has moved realms
if 'realm_id' in key.metadata and key.metadata['realm_id'] != str(realm.id):
if email_gateway_bot is None or key.metadata['user_profile_id'] != str(email_gateway_bot.id):
raise AssertionError("Key metadata problem: %s %s / %s" % (key.name, key.metadata, realm.id))
# Email gateway bot sends messages, potentially including attachments, cross-realm.
print("File uploaded by email gateway bot: %s / %s" % (key.name, key.metadata))
elif processing_avatars:
if 'user_profile_id' not in key.metadata:
raise AssertionError("Missing user_profile_id in key metadata: %s" % (key.metadata,))
if int(key.metadata['user_profile_id']) not in user_ids:
raise AssertionError("Wrong user_profile_id in key metadata: %s" % (key.metadata,))
elif 'realm_id' not in key.metadata:
raise AssertionError("Missing realm_id in key metadata: %s" % (key.metadata,))
record = dict(s3_path=key.name, bucket=bucket_name,
size=key.size, last_modified=key.last_modified,
content_type=key.content_type, md5=key.md5)
record.update(key.metadata)
if processing_emoji:
record['file_name'] = os.path.basename(key.name)
# A few early avatars don't have 'realm_id' on the object; fix their metadata
user_profile = get_user_profile_by_id(record['user_profile_id'])
if 'realm_id' not in record:
record['realm_id'] = user_profile.realm_id
record['user_profile_email'] = user_profile.email
# Fix the record ids
record['user_profile_id'] = int(record['user_profile_id'])
record['realm_id'] = int(record['realm_id'])
record['path'] = key.name
if processing_avatars or processing_emoji:
filename = os.path.join(output_dir, key.name)
else:
fields = key.name.split('/')
if len(fields) != 3:
raise AssertionError("Suspicious key with invalid format %s" % (key.name))
filename = os.path.join(output_dir, key.name)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
key.get_contents_to_filename(filename)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_uploads_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
for attachment in Attachment.objects.filter(realm_id=realm.id):
local_path = os.path.join(local_dir, attachment.path_id)
output_path = os.path.join(output_dir, attachment.path_id)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
subprocess.check_call(["cp", "-a", local_path, output_path])
stat = os.stat(local_path)
record = dict(realm_id=attachment.realm_id,
user_profile_id=attachment.owner.id,
user_profile_email=attachment.owner.email,
s3_path=attachment.path_id,
path=attachment.path_id,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
users = list(UserProfile.objects.filter(realm=realm))
users += [
get_system_bot(settings.NOTIFICATION_BOT),
get_system_bot(settings.EMAIL_GATEWAY_BOT),
get_system_bot(settings.WELCOME_BOT),
]
for user in users:
if user.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
continue
avatar_path = user_avatar_path_from_ids(user.id, realm.id)
wildcard = os.path.join(local_dir, avatar_path + '.*')
for local_path in glob.glob(wildcard):
logging.info('Copying avatar file for user %s from %s' % (
user.email, local_path))
fn = os.path.relpath(local_path, local_dir)
output_path = os.path.join(output_dir, fn)
os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)
subprocess.check_call(["cp", "-a", str(local_path), str(output_path)])
stat = os.stat(local_path)
record = dict(realm_id=realm.id,
user_profile_id=user.id,
user_profile_email=user.email,
s3_path=fn,
path=fn,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_emoji_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
for realm_emoji in RealmEmoji.objects.filter(realm_id=realm.id):
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=realm.id,
emoji_file_name=realm_emoji.file_name
)
local_path = os.path.join(local_dir, emoji_path)
output_path = os.path.join(output_dir, emoji_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
subprocess.check_call(["cp", "-a", local_path, output_path])
record = dict(realm_id=realm.id,
author=realm_emoji.author.id,
path=emoji_path,
s3_path=emoji_path,
file_name=realm_emoji.file_name,
name=realm_emoji.name,
deactivated=realm_emoji.deactivated)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
stats_file = os.path.join(output_dir, 'stats.txt')
realm_file = os.path.join(output_dir, 'realm.json')
attachment_file = os.path.join(output_dir, 'attachment.json')
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
fns = sorted([attachment_file] + message_files + [realm_file])
logging.info('Writing stats file: %s\n' % (stats_file,))
with open(stats_file, 'w') as f:
for fn in fns:
f.write(os.path.basename(fn) + '\n')
payload = open(fn).read()
data = ujson.loads(payload)
for k in sorted(data):
f.write('%5d %s\n' % (len(data[k]), k))
f.write('\n')
avatar_file = os.path.join(output_dir, 'avatars/records.json')
uploads_file = os.path.join(output_dir, 'uploads/records.json')
for fn in [avatar_file, uploads_file]:
f.write(fn+'\n')
payload = open(fn).read()
data = ujson.loads(payload)
f.write('%5d records\n' % len(data))
f.write('\n')
def do_export_realm(realm: Realm, output_dir: Path, threads: int,
exportable_user_ids: Optional[Set[int]]=None) -> None:
response = {} # type: TableData
# We need at least one thread running to export
# UserMessage rows. The management command should
# enforce this for us.
if not settings.TEST_SUITE:
assert threads >= 1
assert os.path.exists("./manage.py")
realm_config = get_realm_config()
create_soft_link(source=output_dir, in_progress=True)
logging.info("Exporting data from get_realm_config()...")
export_from_config(
response=response,
config=realm_config,
seed_object=realm,
context=dict(realm=realm, exportable_user_ids=exportable_user_ids)
)
logging.info('...DONE with get_realm_config() data')
sanity_check_output(response)
logging.info("Exporting uploaded files and avatars")
export_uploads_and_avatars(realm, output_dir)
# We (sort of) export zerver_message rows here. We write
# them to .partial files that are subsequently fleshed out
# by parallel processes to add in zerver_usermessage data.
# This is for performance reasons, of course. Some installations
# have millions of messages.
logging.info("Exporting .partial files messages")
message_ids = export_partial_message_files(realm, response, output_dir=output_dir)
logging.info('%d messages were exported' % (len(message_ids)))
# zerver_reaction
zerver_reaction = {} # type: TableData
fetch_reaction_data(response=zerver_reaction, message_ids=message_ids)
response.update(zerver_reaction)
# Write realm data
export_file = os.path.join(output_dir, "realm.json")
write_data_to_file(output_file=export_file, data=response)
logging.info('Writing realm data to %s' % (export_file,))
# zerver_attachment
export_attachment_table(realm=realm, output_dir=output_dir, message_ids=message_ids)
# Start parallel jobs to export the UserMessage objects.
launch_user_message_subprocesses(threads=threads, output_dir=output_dir)
logging.info("Finished exporting %s" % (realm.string_id))
create_soft_link(source=output_dir, in_progress=False)
def export_attachment_table(realm: Realm, output_dir: Path, message_ids: Set[int]) -> None:
response = {} # type: TableData
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
output_file = os.path.join(output_dir, "attachment.json")
logging.info('Writing attachment table data to %s' % (output_file,))
write_data_to_file(output_file=output_file, data=response)
def create_soft_link(source: Path, in_progress: bool=True) -> None:
is_done = not in_progress
in_progress_link = '/tmp/zulip-export-in-progress'
done_link = '/tmp/zulip-export-most-recent'
if in_progress:
new_target = in_progress_link
else:
subprocess.check_call(['rm', '-f', in_progress_link])
new_target = done_link
subprocess.check_call(["ln", "-nsf", source, new_target])
if is_done:
logging.info('See %s for output files' % (new_target,))
def launch_user_message_subprocesses(threads: int, output_dir: Path) -> None:
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))
def run_job(shard: str) -> int:
subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
str(output_dir), '--thread', shard])
return 0
for (status, job) in run_parallel(run_job,
[str(x) for x in range(0, threads)],
threads=threads):
print("Shard %s finished, status %s" % (job, status))
def do_export_user(user_profile: UserProfile, output_dir: Path) -> None:
response = {} # type: TableData
export_single_user(user_profile, response)
export_file = os.path.join(output_dir, "user.json")
write_data_to_file(output_file=export_file, data=response)
logging.info("Exporting messages")
export_messages_single_user(user_profile, output_dir)
def export_single_user(user_profile: UserProfile, response: TableData) -> None:
config = get_single_user_config()
export_from_config(
response=response,
config=config,
seed_object=user_profile,
)
def get_single_user_config() -> Config:
# zerver_userprofile
user_profile_config = Config(
table='zerver_userprofile',
is_seeded=True,
exclude=['password', 'api_key'],
)
# zerver_subscription
subscription_config = Config(
table='zerver_subscription',
model=Subscription,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# zerver_recipient
recipient_config = Config(
table='zerver_recipient',
model=Recipient,
virtual_parent=subscription_config,
id_source=('zerver_subscription', 'recipient'),
)
# zerver_stream
Config(
table='zerver_stream',
model=Stream,
virtual_parent=recipient_config,
id_source=('zerver_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
)
return user_profile_config
def export_messages_single_user(user_profile: UserProfile, output_dir: Path,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:
user_message_query = UserMessage.objects.filter(user_profile=user_profile).order_by("id")
min_id = -1
dump_file_id = 1
while True:
actual_query = user_message_query.select_related(
"message", "message__sending_client").filter(id__gt=min_id)[0:chunk_size]
user_message_chunk = [um for um in actual_query]
user_message_ids = set(um.id for um in user_message_chunk)
if len(user_message_chunk) == 0:
break
message_chunk = []
for user_message in user_message_chunk:
item = model_to_dict(user_message.message)
item['flags'] = user_message.flags_list()
item['flags_mask'] = user_message.flags.mask
# Add a few nice, human-readable details
item['sending_client_name'] = user_message.message.sending_client.name
item['display_recipient'] = get_display_recipient(user_message.message.recipient)
message_chunk.append(item)
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
logging.info("Fetched Messages for %s" % (message_filename,))
output = {'zerver_message': message_chunk}
floatify_datetime_fields(output, 'zerver_message')
message_output = dict(output) # type: MessageOutput
write_message_export(message_filename, message_output)
min_id = max(user_message_ids)
dump_file_id += 1
| [
"TableData",
"Path",
"Any",
"Any",
"TableData",
"TableName",
"TableData",
"Config",
"TableData",
"Config",
"Context",
"TableData",
"Config",
"Context",
"TableData",
"Config",
"Context",
"TableData",
"int",
"Set[int]",
"TableData",
"Set[int]",
"TableData",
"Config",
"Context",
"Realm",
"Set[int]",
"Set[int]",
"Path",
"Path",
"Path",
"Path",
"MessageOutput",
"Realm",
"TableData",
"List[Record]",
"Realm",
"Any",
"int",
"Set[int]",
"Path",
"Set[int]",
"Realm",
"Path",
"Realm",
"str",
"Path",
"Realm",
"Path",
"Path",
"Realm",
"Path",
"Path",
"Realm",
"Path",
"Path",
"Path",
"Realm",
"Path",
"int",
"Realm",
"Path",
"Set[int]",
"Path",
"int",
"Path",
"str",
"UserProfile",
"Path",
"UserProfile",
"TableData",
"UserProfile",
"Path"
] | [
7721,
9559,
9571,
9689,
10447,
10465,
14617,
14636,
24451,
24470,
24487,
25720,
25739,
25756,
26740,
26759,
26776,
27440,
27461,
27479,
28570,
28594,
28775,
28794,
28811,
30655,
30698,
30749,
30800,
31626,
31645,
32461,
32475,
32661,
32711,
32990,
35595,
35617,
35636,
35694,
35716,
35776,
37269,
37288,
39043,
39063,
39080,
43055,
43073,
43091,
44215,
44233,
44251,
45924,
45942,
45960,
47124,
48226,
48245,
48260,
50459,
50478,
50497,
50870,
51395,
51412,
51546,
51991,
52016,
52370,
52393,
53567,
53592
] | [
7730,
9563,
9574,
9692,
10456,
10474,
14626,
14642,
24460,
24476,
24494,
25729,
25745,
25763,
26749,
26765,
26783,
27449,
27464,
27487,
28579,
28602,
28784,
28800,
28818,
30660,
30706,
30757,
30804,
31630,
31649,
32465,
32488,
32666,
32720,
33002,
35600,
35620,
35639,
35702,
35720,
35784,
37274,
37292,
39048,
39066,
39084,
43060,
43077,
43095,
44220,
44237,
44255,
45929,
45946,
45964,
47128,
48231,
48249,
48263,
50464,
50482,
50505,
50874,
51398,
51416,
51549,
52002,
52020,
52381,
52402,
53578,
53596
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/feedback.py |
from django.conf import settings
from django.core.mail import EmailMessage
from typing import Any, Mapping, Optional
from zerver.lib.actions import internal_send_message
from zerver.lib.send_email import FromAddress
from zerver.lib.redis_utils import get_redis_client
from zerver.models import get_realm, get_system_bot, \
UserProfile, Realm
import time
client = get_redis_client()
def has_enough_time_expired_since_last_message(sender_email: str, min_delay: float) -> bool:
# This function returns a boolean, but it also has the side effect
# of noting that a new message was received.
key = 'zilencer:feedback:%s' % (sender_email,)
t = int(time.time())
last_time = client.getset(key, t) # type: Optional[bytes]
if last_time is None:
return True
delay = t - int(last_time)
return delay > min_delay
def get_ticket_number() -> int:
num_file = '/var/tmp/.feedback-bot-ticket-number'
try:
ticket_number = int(open(num_file).read()) + 1
except Exception:
ticket_number = 1
open(num_file, 'w').write('%d' % (ticket_number,))
return ticket_number
def deliver_feedback_by_zulip(message: Mapping[str, Any]) -> None:
subject = "%s" % (message["sender_email"],)
if len(subject) > 60:
subject = subject[:57].rstrip() + "..."
content = ''
sender_email = message['sender_email']
# We generate ticket numbers if it's been more than a few minutes
# since their last message. This avoids some noise when people use
# enter-send.
need_ticket = has_enough_time_expired_since_last_message(sender_email, 180)
if need_ticket:
ticket_number = get_ticket_number()
content += '\n~~~'
content += '\nticket Z%03d (@support please ack)' % (ticket_number,)
content += '\nsender: %s' % (message['sender_full_name'],)
content += '\nemail: %s' % (sender_email,)
if 'sender_realm_str' in message:
content += '\nrealm: %s' % (message['sender_realm_str'],)
content += '\n~~~'
content += '\n\n'
content += message['content']
user_profile = get_system_bot(settings.FEEDBACK_BOT)
internal_send_message(user_profile.realm, settings.FEEDBACK_BOT,
"stream", settings.FEEDBACK_STREAM, subject, content)
def handle_feedback(event: Mapping[str, Any]) -> None:
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], FromAddress.SUPPORT)
headers = {'Reply-To': '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
if settings.FEEDBACK_STREAM is not None:
deliver_feedback_by_zulip(event)
| [
"str",
"float",
"Mapping[str, Any]",
"Mapping[str, Any]"
] | [
452,
468,
1168,
2343
] | [
455,
473,
1185,
2360
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/fix_unreads.py |
import time
import logging
from typing import Callable, List, TypeVar
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bound=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''
logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor: CursorObj, user_profile: UserProfile) -> Callable[[int, str], bool]:
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id: int, topic: str) -> bool:
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor: CursorObj, user_message_ids: List[int]) -> None:
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message: str, f: Callable[[], None]) -> None:
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:
recipient_ids = []
def find_recipients() -> None:
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find() -> None:
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix() -> None:
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None:
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients() -> None:
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids() -> None:
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix() -> None:
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile: UserProfile) -> None:
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)
| [
"CursorObj",
"UserProfile",
"int",
"str",
"CursorObj",
"List[int]",
"str",
"Callable[[], None]",
"CursorObj",
"UserProfile",
"CursorObj",
"UserProfile",
"UserProfile"
] | [
546,
571,
1192,
1204,
1325,
1354,
1619,
1627,
1832,
1857,
3939,
3964,
6526
] | [
555,
582,
1195,
1207,
1334,
1363,
1622,
1645,
1841,
1868,
3948,
3975,
6537
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/generate_test_data.py | import itertools
import ujson
import random
from typing import List, Dict, Any, Optional
def load_config() -> Dict[str, Any]:
with open("zerver/tests/fixtures/config.generate_data.json", "r") as infile:
config = ujson.load(infile)
return config
def get_stream_title(gens: Dict[str, Any]) -> str:
return next(gens["adjectives"]) + " " + next(gens["nouns"]) + " " + \
next(gens["connectors"]) + " " + next(gens["verbs"]) + " " + \
next(gens["adverbs"])
def load_generators(config: Dict[str, Any]) -> Dict[str, Any]:
results = {}
cfg = config["gen_fodder"]
results["nouns"] = itertools.cycle(cfg["nouns"])
results["adjectives"] = itertools.cycle(cfg["adjectives"])
results["connectors"] = itertools.cycle(cfg["connectors"])
results["verbs"] = itertools.cycle(cfg["verbs"])
results["adverbs"] = itertools.cycle(cfg["adverbs"])
results["emojis"] = itertools.cycle(cfg["emoji"])
results["links"] = itertools.cycle(cfg["links"])
results["maths"] = itertools.cycle(cfg["maths"])
results["inline-code"] = itertools.cycle(cfg["inline-code"])
results["code-blocks"] = itertools.cycle(cfg["code-blocks"])
results["quote-blocks"] = itertools.cycle(cfg["quote-blocks"])
results["lists"] = itertools.cycle(cfg["lists"])
return results
def parse_file(config: Dict[str, Any], gens: Dict[str, Any], corpus_file: str) -> List[str]:
# First, load the entire file into a dictionary,
# then apply our custom filters to it as needed.
paragraphs = [] # type: List[str]
with open(corpus_file, "r") as infile:
# OUR DATA: we need to separate the person talking and what they say
paragraphs = remove_line_breaks(infile)
paragraphs = add_flair(paragraphs, gens)
return paragraphs
def get_flair_gen(length: int) -> List[str]:
# Grab the percentages from the config file
# create a list that we can consume that will guarantee the distribution
result = []
for k, v in config["dist_percentages"].items():
result.extend([k] * int(v * length / 100))
result.extend(["None"] * (length - len(result)))
random.shuffle(result)
return result
def add_flair(paragraphs: List[str], gens: Dict[str, Any]) -> List[str]:
# roll the dice and see what kind of flair we should add, if any
results = []
flair = get_flair_gen(len(paragraphs))
for i in range(len(paragraphs)):
key = flair[i]
if key == "None":
txt = paragraphs[i]
elif key == "italic":
txt = add_md("*", paragraphs[i])
elif key == "bold":
txt = add_md("**", paragraphs[i])
elif key == "strike-thru":
txt = add_md("~~", paragraphs[i])
elif key == "quoted":
txt = ">" + paragraphs[i]
elif key == "quote-block":
txt = paragraphs[i] + "\n" + next(gens["quote-blocks"])
elif key == "inline-code":
txt = paragraphs[i] + "\n" + next(gens["inline-code"])
elif key == "code-block":
txt = paragraphs[i] + "\n" + next(gens["code-blocks"])
elif key == "math":
txt = paragraphs[i] + "\n" + next(gens["maths"])
elif key == "list":
txt = paragraphs[i] + "\n" + next(gens["lists"])
elif key == "emoji":
txt = add_emoji(paragraphs[i], next(gens["emojis"]))
elif key == "link":
txt = add_link(paragraphs[i], next(gens["links"]))
elif key == "picture":
txt = txt # TODO: implement pictures
results.append(txt)
return results
def add_md(mode: str, text: str) -> str:
# mode means: bold, italic, etc.
# to add a list at the end of a paragraph, * iterm one\n * item two
# find out how long the line is, then insert the mode before the end
vals = text.split()
start = random.randrange(len(vals))
end = random.randrange(len(vals) - start) + start
vals[start] = mode + vals[start]
vals[end] = vals[end] + mode
return " ".join(vals).strip()
def add_emoji(text: str, emoji: str) -> str:
vals = text.split()
start = random.randrange(len(vals))
vals[start] = vals[start] + " " + emoji + " "
return " ".join(vals)
def add_link(text: str, link: str) -> str:
vals = text.split()
start = random.randrange(len(vals))
vals[start] = vals[start] + " " + link + " "
return " ".join(vals)
def remove_line_breaks(fh: Any) -> List[str]:
# We're going to remove line breaks from paragraphs
results = [] # save the dialogs as tuples with (author, dialog)
para = [] # we'll store the lines here to form a paragraph
for line in fh:
text = line.strip()
if text != "":
para.append(text)
else:
if para:
results.append(" ".join(para))
# reset the paragraph
para = []
if para:
results.append(" ".join(para))
return results
def write_file(paragraphs: List[str], filename: str) -> None:
with open(filename, "w") as outfile:
outfile.write(ujson.dumps(paragraphs))
def create_test_data() -> None:
gens = load_generators(config) # returns a dictionary of generators
paragraphs = parse_file(config, gens, config["corpus"]["filename"])
write_file(paragraphs, "var/test_messages.json")
config = load_config() # type: Dict[str, Any]
if __name__ == "__main__":
create_test_data() # type: () -> ()
| [
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"Dict[str, Any]",
"str",
"int",
"List[str]",
"Dict[str, Any]",
"str",
"str",
"str",
"str",
"str",
"str",
"Any",
"List[str]",
"str"
] | [
291,
520,
1350,
1372,
1401,
1835,
2227,
2244,
3643,
3654,
4096,
4108,
4283,
4294,
4477,
5030,
5051
] | [
305,
534,
1364,
1386,
1404,
1838,
2236,
2258,
3646,
3657,
4099,
4111,
4286,
4297,
4480,
5039,
5054
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/hotspots.py | from django.conf import settings
from django.utils.translation import ugettext as _
from zerver.models import UserProfile, UserHotspot
from typing import List, Dict
ALL_HOTSPOTS = {
'intro_reply': {
'title': _('Reply to a message'),
'description': _('Click anywhere on a message to reply.'),
},
'intro_streams': {
'title': _('Catch up on a stream'),
'description': _('Messages sent to a stream are seen by everyone subscribed '
'to that stream. Try clicking on one of the stream links below.'),
},
'intro_topics': {
'title': _('Topics'),
'description': _('Every message has a topic. Topics keep conversations '
'easy to follow, and make it easy to reply to conversations that start '
'while you are offline.'),
},
'intro_compose': {
'title': _('Compose'),
'description': _('Click here to start a new conversation. Pick a topic '
'(2-3 words is best), and give it a go!'),
},
} # type: Dict[str, Dict[str, str]]
def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
# For manual testing, it can be convenient to set
# ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
# make it easy to click on all of the hotspots.
if settings.ALWAYS_SEND_ALL_HOTSPOTS:
return [{
'name': hotspot,
'title': ALL_HOTSPOTS[hotspot]['title'],
'description': ALL_HOTSPOTS[hotspot]['description'],
'delay': 0,
} for hotspot in ALL_HOTSPOTS]
if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
return []
seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))
for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_compose']:
if hotspot not in seen_hotspots:
return [{
'name': hotspot,
'title': ALL_HOTSPOTS[hotspot]['title'],
'description': ALL_HOTSPOTS[hotspot]['description'],
'delay': 0.5,
}]
user.tutorial_status = UserProfile.TUTORIAL_FINISHED
user.save(update_fields=['tutorial_status'])
return []
def copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:
for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,
timestamp=userhotspot.timestamp)
target_profile.tutorial_status = source_profile.tutorial_status
target_profile.onboarding_steps = source_profile.onboarding_steps
target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])
| [
"UserProfile",
"UserProfile",
"UserProfile"
] | [
1140,
2319,
2348
] | [
1151,
2330,
2359
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/html_diff.py | import lxml
from lxml.html.diff import htmldiff
from typing import Optional
def highlight_with_class(text: str, klass: str) -> str:
return '<span class="%s">%s</span>' % (klass, text)
def highlight_html_differences(s1: str, s2: str, msg_id: Optional[int]=None) -> str:
retval = htmldiff(s1, s2)
fragment = lxml.html.fromstring(retval)
for elem in fragment.cssselect('del'):
elem.tag = 'span'
elem.set('class', 'highlight_text_deleted')
for elem in fragment.cssselect('ins'):
elem.tag = 'span'
elem.set('class', 'highlight_text_inserted')
retval = lxml.html.tostring(fragment)
return retval
| [
"str",
"str",
"str",
"str"
] | [
109,
121,
226,
235
] | [
112,
124,
229,
238
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/i18n.py | # -*- coding: utf-8 -*-
import operator
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.lru_cache import lru_cache
from itertools import zip_longest
from typing import Any, List, Dict, Optional
import os
import ujson
def with_language(string: str, language: str) -> str:
"""
This is an expensive function. If you are using it in a loop, it will
make your code slow.
"""
old_language = translation.get_language()
translation.activate(language)
result = _(string)
translation.activate(old_language)
return result
@lru_cache()
def get_language_list() -> List[Dict[str, Any]]:
path = os.path.join(settings.STATIC_ROOT, 'locale', 'language_name_map.json')
with open(path, 'r') as reader:
languages = ujson.load(reader)
return languages['name_map']
def get_language_list_for_templates(default_language: str) -> List[Dict[str, Dict[str, str]]]:
language_list = [l for l in get_language_list()
if 'percent_translated' not in l or
l['percent_translated'] >= 5.]
formatted_list = []
lang_len = len(language_list)
firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
firsts = list(range(0, firsts_end))
seconds = list(range(firsts_end, lang_len))
assert len(firsts) + len(seconds) == lang_len
for row in zip_longest(firsts, seconds):
item = {}
for position, ind in zip(['first', 'second'], row):
if ind is None:
continue
lang = language_list[ind]
percent = name = lang['name']
if 'percent_translated' in lang:
percent = "{} ({}%)".format(name, lang['percent_translated'])
selected = False
if default_language in (lang['code'], lang['locale']):
selected = True
item[position] = {
'name': name,
'code': lang['code'],
'percent': percent,
'selected': selected
}
formatted_list.append(item)
return formatted_list
def get_language_name(code: str) -> Optional[str]:
for lang in get_language_list():
if code in (lang['code'], lang['locale']):
return lang['name']
return None
def get_available_language_codes() -> List[str]:
language_list = get_language_list()
codes = [language['code'] for language in language_list]
return codes
def get_language_translation_data(language: str) -> Dict[str, str]:
if language == 'zh-hans':
language = 'zh_Hans'
elif language == 'zh-hant':
language = 'zh_Hant'
elif language == 'id-id':
language = 'id_ID'
path = os.path.join(settings.STATIC_ROOT, 'locale', language, 'translations.json')
try:
with open(path, 'r') as reader:
return ujson.load(reader)
except FileNotFoundError:
print('Translation for {} not found at {}'.format(language, path))
return {}
| [
"str",
"str",
"str",
"str",
"str"
] | [
338,
353,
954,
2206,
2578
] | [
341,
356,
957,
2209,
2581
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/import_realm.py | import datetime
import logging
import os
import ujson
import shutil
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from django.conf import settings
from django.db import connection
from django.db.models import Max
from django.utils.timezone import utc as timezone_utc, now as timezone_now
from typing import Any, Dict, List, Optional, Set, Tuple, \
Iterable, cast
from zerver.lib.actions import UserMessageLite, bulk_insert_ums
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.export import DATE_FIELDS, realm_tables, \
Record, TableData, TableName, Field, Path
from zerver.lib.message import do_render_markdown, RealmAlertWords
from zerver.lib.bugdown import version as bugdown_version
from zerver.lib.upload import random_name, sanitize_name, \
S3UploadBackend, LocalUploadBackend, guess_type
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.models import UserProfile, Realm, Client, Huddle, Stream, \
UserMessage, Subscription, Message, RealmEmoji, \
RealmDomain, Recipient, get_user_profile_by_id, \
UserPresence, UserActivity, UserActivityInterval, Reaction, \
CustomProfileField, CustomProfileFieldValue, RealmAuditLog, \
Attachment, get_system_bot, email_to_username, get_huddle_hash, \
UserHotspot, MutedTopic, Service, UserGroup, UserGroupMembership, \
BotStorageData, BotConfigData
# Code from here is the realm import code path
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicity initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP = {
'client': {},
'user_profile': {},
'huddle': {},
'realm': {},
'stream': {},
'recipient': {},
'subscription': {},
'defaultstream': {},
'reaction': {},
'realmemoji': {},
'realmdomain': {},
'realmfilter': {},
'message': {},
'user_presence': {},
'useractivity': {},
'useractivityinterval': {},
'usermessage': {},
'customprofilefield': {},
'customprofilefieldvalue': {},
'attachment': {},
'realmauditlog': {},
'recipient_to_huddle_map': {},
'userhotspot': {},
'mutedtopic': {},
'service': {},
'usergroup': {},
'usergroupmembership': {},
'botstoragedata': {},
'botconfigdata': {},
} # type: Dict[str, Dict[int, int]]
id_map_to_list = {
'huddle_to_user_list': {},
} # type: Dict[str, Dict[int, List[int]]]
path_maps = {
'attachment_path': {},
} # type: Dict[str, Dict[str, str]]
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception('''
Table %s is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
''' % (table,))
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=timezone_utc)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message['has_attachment'] is True:
for key, value in path_maps['attachment_path'].items():
if key in message['content']:
message['content'] = message['content'].replace(key, value)
if message['rendered_content']:
message['rendered_content'] = message['rendered_content'].replace(key, value)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
# from bulk_add_subscriptions in lib/actions
event_last_message_id = Message.objects.aggregate(Max('id'))['id__max']
if event_last_message_id is None:
event_last_message_id = -1
event_time = timezone_now()
recipient_id_to_stream_id = {
d['id']: d['type_id']
for d in data['zerver_recipient']
if d['type'] == Recipient.STREAM
}
for sub in data['zerver_subscription']:
recipient_id = sub['recipient_id']
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub['user_profile_id']
all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item['token'] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]
huddle['huddle_hash'] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list['huddle_to_user_list'] = {
value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}
for subscription in data[table]:
if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:
huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]
id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data['zerver_customprofilefield']:
if item['field_type'] == CustomProfileField.USER:
field_type_USER_id_list.append(item['id'])
for item in data['zerver_customprofilefieldvalue']:
if item['field_id'] in field_type_USER_id_list:
old_user_id_list = ujson.loads(item['value'])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table='zerver_customprofilefieldvalue',
field_name='value',
related_table='user_profile',
old_id_list=old_user_id_list)
item['value'] = ujson.dumps(new_id_list)
class FakeMessage:
'''
We just need a stub object for do_render_markdown
to write stuff to.
'''
pass
def fix_message_rendered_content(realm: Realm,
sender_map: Dict[int, Record],
messages: List[Record]) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message['rendered_content'] is not None:
# For Zulip->Zulip imports, we use the original rendered markdown.
continue
message_object = FakeMessage()
try:
content = message['content']
sender_id = message['sender_id']
sender = sender_map[sender_id]
sent_by_bot = sender['is_bot']
translate_emoticons = sender['translate_emoticons']
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words = dict() # type: RealmAlertWords
message_user_ids = set() # type: Set[int]
rendered_content = do_render_markdown(
message=cast(Message, message_object),
content=content,
realm=realm,
realm_alert_words=realm_alert_words,
message_user_ids=message_user_ids,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
)
assert(rendered_content is not None)
message['rendered_content'] = rendered_content
message['rendered_content_version'] = bugdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering markdown throwing an uncaught exception
# * rendering markdown failing with the exception being
# caught in bugdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning("Error in markdown rendering for message ID %s; continuing" % (message['id']))
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return 'zerver_realmalias_id_seq'
elif model_class == BotStorageData:
return 'zerver_botuserstatedata_id_seq'
elif model_class == BotConfigData:
return 'zerver_botuserconfigdata_id_seq'
return '{}_id_seq'.format(model_class._meta.db_table)
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of ids to import the
converted slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval('%s') from generate_series(1,%s)" %
(sequence, str(count)))
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert('usermessage' not in related_table)
re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,
recipient_field, reaction_field)
def re_map_foreign_keys_internal(data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item['type'] == 2:
pass
elif related_table == "user_profile" and item['type'] == 1:
pass
elif related_table == "huddle" and item['type'] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]
pass
else:
continue
old_id = item[field_name]
if reaction_field:
if item['reaction_type'] == Reaction.REALM_EMOJI:
old_id = int(old_id)
else:
continue
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
if reaction_field:
item[field_name] = str(new_id)
else:
item[field_name] = new_id
def re_map_foreign_keys_many_to_many(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool=False) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join(['1' if field[1] else '0' for field in
item[field_name]])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert('usermessage' not in table)
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = 'zerver_usermessage'
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id = item['user_profile_id'],
message_id = item['message_id'],
flags=item['flags'],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s." % (model, table))
else:
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def import_uploads_local(import_dir: Path, processing_avatars: bool=False,
processing_emojis: bool=False) -> None:
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
if not processing_emojis:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
for record in records:
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path)
if record['s3_path'].endswith('.original'):
file_path += '.original'
else:
file_path += '.png'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", emoji_path)
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_image'
s3_file_name = "/".join([
str(record['realm_id']),
random_name(18),
sanitize_name(os.path.basename(record['path']))
])
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", s3_file_name)
path_maps['attachment_path'][record['path']] = s3_file_name
orig_file_path = os.path.join(import_dir, record['path'])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
upload_backend = LocalUploadBackend()
for record in records:
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])
medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_path) + '-medium.png'
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
def import_uploads_s3(bucket_name: str, import_dir: Path, processing_avatars: bool=False,
processing_emojis: bool=False) -> None:
upload_backend = S3UploadBackend()
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
timestamp = datetime_to_timestamp(timezone_now())
if not processing_emojis:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
for record in records:
key = Key(bucket)
if processing_avatars:
# For avatars, we need to rehash the user's email with the
# new server's avatar salt
avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
key.key = avatar_path
if record['s3_path'].endswith('.original'):
key.key += '.original'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
key.key = emoji_path
record['last_modified'] = timestamp
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_image'
s3_file_name = "/".join([
str(record['realm_id']),
random_name(18),
sanitize_name(os.path.basename(record['path']))
])
key.key = s3_file_name
path_maps['attachment_path'][record['s3_path']] = s3_file_name
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
if not processing_emojis:
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,))
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
key.set_metadata("user_profile_id", str(user_profile.id))
if 'last_modified' in record:
key.set_metadata("orig_last_modified", record['last_modified'])
key.set_metadata("realm_id", str(record['realm_id']))
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record['s3_path'])[0]
headers = {'Content-Type': content_type}
key.set_contents_from_filename(os.path.join(import_dir, record['path']), headers=headers)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
upload_backend = S3UploadBackend()
for record in records:
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
def import_uploads(import_dir: Path, processing_avatars: bool=False,
processing_emojis: bool=False) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
else:
logging.info("Importing uploaded files")
if settings.LOCAL_UPLOADS_DIR:
import_uploads_local(import_dir, processing_avatars=processing_avatars,
processing_emojis=processing_emojis)
else:
if processing_avatars or processing_emojis:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
import_uploads_s3(bucket_name, import_dir, processing_avatars=processing_avatars,
processing_emojis=processing_emojis)
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str) -> Realm:
logging.info("Importing realm dump %s" % (import_dir,))
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
logging.info("Importing realm data from %s" % (realm_data_filename,))
with open(realm_data_filename) as f:
data = ujson.load(f)
sort_by_date = data.get('sort_by_date', False)
bulk_import_client(data, Client, 'zerver_client')
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, 'stream')
re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table="stream")
fix_datetime_fields(data, 'zerver_realm')
# Fix realm subdomain information
data['zerver_realm'][0]['string_id'] = subdomain
data['zerver_realm'][0]['name'] = subdomain
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
update_model_ids(Realm, data, 'realm')
realm = Realm(**data['zerver_realm'][0])
if settings.BILLING_ENABLED:
realm.plan_type = Realm.LIMITED
else:
realm.plan_type = Realm.SELF_HOSTED
if realm.notifications_stream_id is not None:
notifications_stream_id = int(realm.notifications_stream_id) # type: Optional[int]
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id = int(realm.signup_notifications_stream_id) # type: Optional[int]
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table="realm")
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s" % (item['id'], get_system_bot(item['email']).id))
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, 'zerver_userprofile')
update_model_ids(UserProfile, data, 'user_profile')
re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',
related_table="message", id_field=True)
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, 'realm', related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
if 'zerver_huddle' in data:
update_model_ids(Huddle, data, 'huddle')
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="stream",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="user_profile",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="huddle",
recipient_field=True, id_field=True)
update_model_ids(Recipient, data, 'recipient')
bulk_import_model(data, Recipient)
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
get_huddles_from_subscription(data, 'zerver_subscription')
re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table="recipient")
update_model_ids(Subscription, data, 'subscription')
bulk_import_model(data, Subscription)
if 'zerver_realmauditlog' in data:
fix_datetime_fields(data, 'zerver_realmauditlog')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',
related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info('about to call create_subscription_events')
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info('done with create_subscription_events')
if 'zerver_huddle' in data:
process_huddle_hash(data, 'zerver_huddle')
bulk_import_model(data, Huddle)
if 'zerver_userhotspot' in data:
fix_datetime_fields(data, 'zerver_userhotspot')
re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')
update_model_ids(UserHotspot, data, 'userhotspot')
bulk_import_model(data, UserHotspot)
if 'zerver_mutedtopic' in data:
re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')
update_model_ids(MutedTopic, data, 'mutedtopic')
bulk_import_model(data, MutedTopic)
if 'zerver_service' in data:
re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')
fix_service_tokens(data, 'zerver_service')
update_model_ids(Service, data, 'service')
bulk_import_model(data, Service)
if 'zerver_usergroup' in data:
re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')
re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',
'members', related_table='user_profile')
update_model_ids(UserGroup, data, 'usergroup')
bulk_import_model(data, UserGroup)
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_group', related_table='usergroup')
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_profile', related_table='user_profile')
update_model_ids(UserGroupMembership, data, 'usergroupmembership')
bulk_import_model(data, UserGroupMembership)
if 'zerver_botstoragedata' in data:
re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')
update_model_ids(BotStorageData, data, 'botstoragedata')
bulk_import_model(data, BotStorageData)
if 'zerver_botconfigdata' in data:
re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')
update_model_ids(BotConfigData, data, 'botconfigdata')
bulk_import_model(data, BotConfigData)
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
update_model_ids(UserPresence, data, 'user_presence')
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
update_model_ids(UserActivity, data, 'useractivity')
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
update_model_ids(UserActivityInterval, data, 'useractivityinterval')
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',
related_table="user_profile")
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',
related_table="customprofilefield")
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(os.path.join(import_dir, "avatars"), processing_avatars=True)
import_uploads(os.path.join(import_dir, "uploads"))
# We need to have this check as the emoji files are only present in the data
# importer from slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(os.path.join(import_dir, "emoji"), processing_emojis=True)
sender_map = {
user['id']: user
for user in data['zerver_userprofile']
}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table="realmemoji", id_field=True,
reaction_field=True)
update_model_ids(Reaction, data, 'reaction')
bulk_import_model(data, Reaction)
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s" % (fn,))
with open(fn) as f:
data = ujson.load(f)
import_attachments(data)
return realm
# create_users and do_import_system_bots differ from their equivalent in
# zerver/management/commands/initialize_voyager_db.py because here we check if the bots
# don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]
create_users(realm, names, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],
bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path,
sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table='message',
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path,
sort_by_date: bool) -> List[int]:
'''
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of pub_date, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
'''
if sort_by_date:
tups = list() # type: List[Tuple[int, int]]
else:
message_ids = [] # type: List[int]
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
# Aggressively free up memory.
del data['zerver_usermessage']
for row in data['zerver_message']:
# We truncate pub_date to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row['id']
if sort_by_date:
pub_date = int(row['pub_date'])
tup = (pub_date, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm,
sender_map: Dict[int, Record],
import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
logging.info("Importing message dump %s" % (message_filename,))
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
# Parser to update message content with the updated attachment urls
fix_upload_links(data, 'zerver_message')
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP['message']
for row in data['zerver_message']:
row['id'] = message_id_map[row['id']]
for row in data['zerver_usermessage']:
assert(row['message'] in message_id_map)
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data['zerver_message'],
)
logging.info("Successfully rendered markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
update_model_ids(parent_model, data, 'attachment')
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows = [] # type: List[Record]
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row = {} # type: Record
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = ID_MAP['message'][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data = {m2m_table_name: m2m_rows} # type: TableData
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = '''
insert into %s (%s, %s) values(%%s, %%s);''' % (m2m_table_name,
parent_id,
child_id)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
cursor.executemany(sql_template, tups)
logging.info('Successfully imported M2M table %s' % (m2m_table_name,))
| [
"TableName",
"int",
"int",
"TableData",
"TableName",
"TableData",
"TableName",
"TableData",
"int",
"TableData",
"TableName",
"TableData",
"TableName",
"TableData",
"TableName",
"TableData",
"Realm",
"Dict[int, Record]",
"List[Record]",
"TableData",
"TableName",
"Any",
"Any",
"int",
"TableData",
"TableName",
"Field",
"TableData",
"TableName",
"Field",
"TableName",
"List[Record]",
"TableName",
"Field",
"TableName",
"TableData",
"TableName",
"Field",
"TableName",
"TableName",
"Field",
"TableName",
"List[int]",
"TableData",
"TableName",
"Field",
"TableData",
"TableName",
"Field",
"Any",
"Any",
"TableData",
"TableName",
"TableData",
"int",
"List[Dict[str, Any]]",
"TableData",
"Any",
"TableData",
"Any",
"TableName",
"Path",
"str",
"Path",
"Path",
"Path",
"str",
"Any",
"Realm",
"Iterable[Tuple[str, str]]",
"Path",
"bool",
"Path",
"bool",
"Realm",
"Dict[int, Record]",
"Path",
"TableData"
] | [
2876,
2895,
2908,
3220,
3238,
3508,
3534,
4274,
4295,
6098,
6116,
6394,
6412,
6719,
6737,
7400,
8365,
8417,
8479,
10508,
10526,
10740,
11095,
11107,
11742,
11760,
11783,
12258,
12300,
12347,
12393,
13254,
13308,
13364,
13419,
15811,
15866,
15926,
15985,
16654,
16723,
16791,
16861,
17760,
17778,
17801,
17984,
18002,
18025,
18404,
18537,
18548,
18574,
19186,
19211,
19600,
20168,
20186,
20858,
20876,
20888,
21220,
24603,
24620,
28481,
30074,
30091,
43059,
43495,
43513,
43915,
43967,
44550,
44599,
46543,
46586,
46641,
48752
] | [
2885,
2898,
2911,
3229,
3247,
3517,
3543,
4283,
4298,
6107,
6125,
6403,
6421,
6728,
6746,
7409,
8370,
8434,
8491,
10517,
10535,
10743,
11098,
11110,
11751,
11769,
11788,
12267,
12309,
12352,
12402,
13266,
13317,
13369,
13428,
15820,
15875,
15931,
15994,
16663,
16728,
16800,
16870,
17769,
17787,
17806,
17993,
18011,
18030,
18407,
18540,
18557,
18583,
19195,
19214,
19620,
20177,
20189,
20867,
20879,
20897,
21224,
24606,
24624,
28485,
30078,
30094,
43062,
43500,
43538,
43919,
43971,
44554,
44603,
46548,
46603,
46645,
48761
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/initial_password.py |
from django.conf import settings
import hashlib
import base64
from typing import Optional
def initial_password(email: str) -> Optional[str]:
"""Given an email address, returns the initial password for that account, as
created by populate_db."""
if settings.INITIAL_PASSWORD_SALT is not None:
encoded_key = (settings.INITIAL_PASSWORD_SALT + email).encode("utf-8")
digest = hashlib.sha256(encoded_key).digest()
return base64.b64encode(digest)[:16].decode('utf-8')
else:
# None as a password for a user tells Django to set an unusable password
return None
| [
"str"
] | [
123
] | [
126
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/integrations.py | import os
import pathlib
from typing import Dict, List, Optional, TypeVar, Any
from django.conf import settings
from django.conf.urls import url
from django.urls.resolvers import LocaleRegexProvider
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.template import loader
from zerver.templatetags.app_filters import render_markdown_path
"""This module declares all of the (documented) integrations available
in the Zulip server. The Integration class is used as part of
generating the documentation on the /integrations page, while the
WebhookIntegration class is also used to generate the URLs in
`zproject/urls.py` for webhook integrations.
To add a new non-webhook integration, add code to the INTEGRATIONS
dictionary below.
To add a new webhook integration, declare a WebhookIntegration in the
WEBHOOK_INTEGRATIONS list below (it will be automatically added to
INTEGRATIONS).
To add a new integration category, add to the CATEGORIES dict.
Over time, we expect this registry to grow additional convenience
features for writing and configuring integrations efficiently.
"""
CATEGORIES = {
'meta-integration': _('Integration frameworks'),
'continuous-integration': _('Continuous integration'),
'customer-support': _('Customer support'),
'deployment': _('Deployment'),
'communication': _('Communication'),
'financial': _('Financial'),
'hr': _('HR'),
'marketing': _('Marketing'),
'misc': _('Miscellaneous'),
'monitoring': _('Monitoring tools'),
'project-management': _('Project management'),
'productivity': _('Productivity'),
'version-control': _('Version control'),
'bots': _('Interactive bots'),
} # type: Dict[str, str]
class Integration:
DEFAULT_LOGO_STATIC_PATH_PNG = 'static/images/integrations/logos/{name}.png'
DEFAULT_LOGO_STATIC_PATH_SVG = 'static/images/integrations/logos/{name}.svg'
def __init__(self, name: str, client_name: str, categories: List[str],
logo: Optional[str]=None, secondary_line_text: Optional[str]=None,
display_name: Optional[str]=None, doc: Optional[str]=None,
stream_name: Optional[str]=None, legacy: Optional[bool]=False) -> None:
self.name = name
self.client_name = client_name
self.secondary_line_text = secondary_line_text
self.legacy = legacy
self.doc = doc
for category in categories:
if category not in CATEGORIES:
raise KeyError( # nocoverage
'INTEGRATIONS: ' + name + ' - category \'' +
category + '\' is not a key in CATEGORIES.'
)
self.categories = list(map((lambda c: CATEGORIES[c]), categories))
if logo is None:
logo = self.get_logo_url()
self.logo = logo
if display_name is None:
display_name = name.title()
self.display_name = display_name
if stream_name is None:
stream_name = self.name
self.stream_name = stream_name
def is_enabled(self) -> bool:
return True
def get_logo_url(self) -> Optional[str]:
logo_file_path_svg = str(pathlib.PurePath(
settings.STATIC_ROOT,
*self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=self.name).split('/')[1:]
))
logo_file_path_png = str(pathlib.PurePath(
settings.STATIC_ROOT,
*self.DEFAULT_LOGO_STATIC_PATH_PNG.format(name=self.name).split('/')[1:]
))
if os.path.isfile(logo_file_path_svg):
return self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=self.name)
elif os.path.isfile(logo_file_path_png):
return self.DEFAULT_LOGO_STATIC_PATH_PNG.format(name=self.name)
return None
class BotIntegration(Integration):
DEFAULT_LOGO_STATIC_PATH_PNG = 'static/generated/bots/{name}/logo.png'
DEFAULT_LOGO_STATIC_PATH_SVG = 'static/generated/bots/{name}/logo.svg'
ZULIP_LOGO_STATIC_PATH_PNG = 'static/images/logo/zulip-icon-128x128.png'
DEFAULT_DOC_PATH = '{name}/doc.md'
def __init__(self, name: str, categories: List[str], logo: Optional[str]=None,
secondary_line_text: Optional[str]=None, display_name: Optional[str]=None,
doc: Optional[str]=None) -> None:
super().__init__(
name,
client_name=name,
categories=categories,
secondary_line_text=secondary_line_text,
)
if logo is None:
logo_url = self.get_logo_url()
if logo_url is not None:
logo = logo_url
else:
# TODO: Add a test for this by initializing one in a test.
logo = self.ZULIP_LOGO_STATIC_PATH_PNG # nocoverage
self.logo = logo
if display_name is None:
display_name = "{} Bot".format(name.title()) # nocoverage
else:
display_name = "{} Bot".format(display_name)
self.display_name = display_name
if doc is None:
doc = self.DEFAULT_DOC_PATH.format(name=name)
self.doc = doc
class EmailIntegration(Integration):
def is_enabled(self) -> bool:
return settings.EMAIL_GATEWAY_PATTERN != ""
class WebhookIntegration(Integration):
DEFAULT_FUNCTION_PATH = 'zerver.webhooks.{name}.view.api_{name}_webhook'
DEFAULT_URL = 'api/v1/external/{name}'
DEFAULT_CLIENT_NAME = 'Zulip{name}Webhook'
DEFAULT_DOC_PATH = '{name}/doc.{ext}'
def __init__(self, name: str, categories: List[str], client_name: Optional[str]=None,
logo: Optional[str]=None, secondary_line_text: Optional[str]=None,
function: Optional[str]=None, url: Optional[str]=None,
display_name: Optional[str]=None, doc: Optional[str]=None,
stream_name: Optional[str]=None, legacy: Optional[bool]=None) -> None:
if client_name is None:
client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title())
super().__init__(
name,
client_name,
categories,
logo=logo,
secondary_line_text=secondary_line_text,
display_name=display_name,
stream_name=stream_name,
legacy=legacy
)
if function is None:
function = self.DEFAULT_FUNCTION_PATH.format(name=name)
if isinstance(function, str):
function = import_string(function)
self.function = function
if url is None:
url = self.DEFAULT_URL.format(name=name)
self.url = url
if doc is None:
doc = self.DEFAULT_DOC_PATH.format(name=name, ext='md')
self.doc = doc
@property
def url_object(self) -> LocaleRegexProvider:
return url(self.url, self.function)
class HubotIntegration(Integration):
GIT_URL_TEMPLATE = "https://github.com/hubot-scripts/hubot-{}"
def __init__(self, name: str, categories: List[str],
display_name: Optional[str]=None, logo: Optional[str]=None,
logo_alt: Optional[str]=None, git_url: Optional[str]=None,
legacy: bool=False) -> None:
if logo_alt is None:
logo_alt = "{} logo".format(name.title())
self.logo_alt = logo_alt
if git_url is None:
git_url = self.GIT_URL_TEMPLATE.format(name)
self.hubot_docs_url = git_url
super().__init__(
name, name, categories,
logo=logo, display_name=display_name,
doc = 'zerver/integrations/hubot_common.md',
legacy=legacy
)
class GithubIntegration(WebhookIntegration):
"""
We need this class to don't creating url object for git integrations.
We want to have one generic url with dispatch function for github service and github webhook.
"""
def __init__(self, name: str, categories: List[str], client_name: Optional[str]=None,
logo: Optional[str]=None, secondary_line_text: Optional[str]=None,
function: Optional[str]=None, url: Optional[str]=None,
display_name: Optional[str]=None, doc: Optional[str]=None,
stream_name: Optional[str]=None, legacy: Optional[bool]=False) -> None:
url = self.DEFAULT_URL.format(name='github')
super().__init__(
name,
categories,
client_name=client_name,
logo=logo,
secondary_line_text=secondary_line_text,
function=function,
url=url,
display_name=display_name,
doc=doc,
stream_name=stream_name,
legacy=legacy
)
@property
def url_object(self) -> None:
return
class EmbeddedBotIntegration(Integration):
'''
This class acts as a registry for bots verified as safe
and valid such that these are capable of being deployed on the server.
'''
DEFAULT_CLIENT_NAME = 'Zulip{name}EmbeddedBot'
def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:
assert kwargs.get("client_name") is None
client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title())
super().__init__(
name, client_name, *args, **kwargs)
EMBEDDED_BOTS = [
EmbeddedBotIntegration('converter', []),
EmbeddedBotIntegration('encrypt', []),
EmbeddedBotIntegration('helloworld', []),
EmbeddedBotIntegration('virtual_fs', []),
EmbeddedBotIntegration('giphy', []),
EmbeddedBotIntegration('followup', []),
] # type: List[EmbeddedBotIntegration]
WEBHOOK_INTEGRATIONS = [
WebhookIntegration('airbrake', ['monitoring']),
WebhookIntegration('ansibletower', ['deployment'], display_name='Ansible Tower'),
WebhookIntegration('appfollow', ['customer-support'], display_name='AppFollow'),
WebhookIntegration('appveyor', ['continuous-integration'], display_name='AppVeyor'),
WebhookIntegration('beanstalk', ['version-control'], stream_name='commits'),
WebhookIntegration('basecamp', ['project-management']),
WebhookIntegration('beeminder', ['misc'], display_name='Beeminder'),
WebhookIntegration(
'bitbucket2',
['version-control'],
logo='static/images/integrations/logos/bitbucket.svg',
display_name='Bitbucket',
stream_name='bitbucket'
),
WebhookIntegration(
'bitbucket',
['version-control'],
display_name='Bitbucket',
secondary_line_text='(Enterprise)',
stream_name='commits',
legacy=True
),
WebhookIntegration('circleci', ['continuous-integration'], display_name='CircleCI'),
WebhookIntegration('clubhouse', ['project-management']),
WebhookIntegration('codeship', ['continuous-integration', 'deployment']),
WebhookIntegration('crashlytics', ['monitoring']),
WebhookIntegration('dialogflow', ['customer-support'], display_name='Dialogflow'),
WebhookIntegration('delighted', ['customer-support', 'marketing'], display_name='Delighted'),
WebhookIntegration(
'deskdotcom',
['customer-support'],
logo='static/images/integrations/logos/deskcom.png',
display_name='Desk.com',
stream_name='desk'
),
WebhookIntegration('dropbox', ['productivity'], display_name='Dropbox'),
WebhookIntegration('flock', ['customer-support'], display_name='Flock'),
WebhookIntegration('freshdesk', ['customer-support']),
WebhookIntegration('front', ['customer-support'], display_name='Front'),
GithubIntegration(
'github',
['version-control'],
display_name='GitHub',
logo='static/images/integrations/logos/github.svg',
function='zerver.webhooks.github.view.api_github_webhook',
stream_name='github'
),
WebhookIntegration('gitlab', ['version-control'], display_name='GitLab'),
WebhookIntegration('gocd', ['continuous-integration'], display_name='GoCD'),
WebhookIntegration('gogs', ['version-control'], stream_name='commits'),
WebhookIntegration('gosquared', ['marketing'], display_name='GoSquared'),
WebhookIntegration('greenhouse', ['hr'], display_name='Greenhouse'),
WebhookIntegration('groove', ['customer-support'], display_name='Groove'),
WebhookIntegration('hellosign', ['productivity', 'hr'], display_name='HelloSign'),
WebhookIntegration('helloworld', ['misc'], display_name='Hello World'),
WebhookIntegration('heroku', ['deployment'], display_name='Heroku'),
WebhookIntegration('homeassistant', ['misc'], display_name='Home Assistant'),
WebhookIntegration(
'ifttt',
['meta-integration'],
function='zerver.webhooks.ifttt.view.api_iftt_app_webhook',
display_name='IFTTT'
),
WebhookIntegration('insping', ['monitoring'], display_name='Insping'),
WebhookIntegration('intercom', ['customer-support'], display_name='Intercom'),
WebhookIntegration('jira', ['project-management'], display_name='JIRA'),
WebhookIntegration('librato', ['monitoring']),
WebhookIntegration('mention', ['marketing'], display_name='Mention'),
WebhookIntegration('netlify', ['continuous-integration', 'deployment'], display_name='Netlify'),
WebhookIntegration('newrelic', ['monitoring'], display_name='New Relic'),
WebhookIntegration(
'opbeat',
['monitoring'],
display_name='Opbeat',
stream_name='opbeat',
function='zerver.webhooks.opbeat.view.api_opbeat_webhook'
),
WebhookIntegration('opsgenie', ['meta-integration', 'monitoring'], display_name='OpsGenie'),
WebhookIntegration('pagerduty', ['monitoring'], display_name='PagerDuty'),
WebhookIntegration('papertrail', ['monitoring']),
WebhookIntegration('pingdom', ['monitoring']),
WebhookIntegration('pivotal', ['project-management'], display_name='Pivotal Tracker'),
WebhookIntegration('raygun', ['monitoring'], display_name="Raygun"),
WebhookIntegration('reviewboard', ['version-control'], display_name="ReviewBoard"),
WebhookIntegration('semaphore', ['continuous-integration', 'deployment'], stream_name='builds'),
WebhookIntegration('sentry', ['monitoring']),
WebhookIntegration('slack', ['communication']),
WebhookIntegration('solano', ['continuous-integration'], display_name='Solano Labs'),
WebhookIntegration('splunk', ['monitoring'], display_name='Splunk'),
WebhookIntegration('statuspage', ['customer-support'], display_name='Statuspage'),
WebhookIntegration('stripe', ['financial'], display_name='Stripe'),
WebhookIntegration('taiga', ['project-management']),
WebhookIntegration('teamcity', ['continuous-integration']),
WebhookIntegration('transifex', ['misc']),
WebhookIntegration('travis', ['continuous-integration'], display_name='Travis CI'),
WebhookIntegration('trello', ['project-management']),
WebhookIntegration('updown', ['monitoring']),
WebhookIntegration(
'yo',
['communication'],
function='zerver.webhooks.yo.view.api_yo_app_webhook',
display_name='Yo App'
),
WebhookIntegration('wordpress', ['marketing'], display_name='WordPress'),
WebhookIntegration('zapier', ['meta-integration']),
WebhookIntegration('zendesk', ['customer-support']),
WebhookIntegration('zabbix', ['monitoring'], display_name='Zabbix'),
WebhookIntegration('gci', ['misc'], display_name='Google Code-in',
stream_name='gci'),
] # type: List[WebhookIntegration]
INTEGRATIONS = {
'asana': Integration('asana', 'asana', ['project-management'], doc='zerver/integrations/asana.md'),
'capistrano': Integration(
'capistrano',
'capistrano',
['deployment'],
display_name='Capistrano',
doc='zerver/integrations/capistrano.md'
),
'codebase': Integration('codebase', 'codebase', ['version-control'],
doc='zerver/integrations/codebase.md'),
'discourse': Integration('discourse', 'discourse', ['communication'],
doc='zerver/integrations/discourse.md'),
'email': EmailIntegration('email', 'email', ['communication'],
doc='zerver/integrations/email.md'),
'errbot': Integration('errbot', 'errbot', ['meta-integration', 'bots'],
doc='zerver/integrations/errbot.md'),
'git': Integration('git', 'git', ['version-control'],
stream_name='commits', doc='zerver/integrations/git.md'),
'google-calendar': Integration(
'google-calendar',
'google-calendar',
['productivity'],
display_name='Google Calendar',
doc='zerver/integrations/google-calendar.md'
),
'hubot': Integration('hubot', 'hubot', ['meta-integration', 'bots'], doc='zerver/integrations/hubot.md'),
'irc': Integration('irc', 'irc', ['communication'], display_name='IRC',
doc='zerver/integrations/irc.md'),
'jenkins': Integration(
'jenkins',
'jenkins',
['continuous-integration'],
secondary_line_text='(or Hudson)',
doc='zerver/integrations/jenkins.md'
),
'jira-plugin': Integration(
'jira-plugin',
'jira-plugin',
['project-management'],
logo='static/images/integrations/logos/jira.svg',
secondary_line_text='(locally installed)',
display_name='JIRA',
doc='zerver/integrations/jira-plugin.md',
stream_name='jira',
legacy=True
),
'matrix': Integration('matrix', 'matrix', ['communication'],
doc='zerver/integrations/matrix.md'),
'mercurial': Integration(
'mercurial',
'mercurial',
['version-control'],
display_name='Mercurial (hg)',
doc='zerver/integrations/mercurial.md',
stream_name='commits',
),
'nagios': Integration('nagios', 'nagios', ['monitoring'], doc='zerver/integrations/nagios.md'),
'openshift': Integration(
'openshift',
'openshift',
['deployment'],
display_name='OpenShift',
doc='zerver/integrations/openshift.md',
stream_name='deployments',
),
'perforce': Integration('perforce', 'perforce', ['version-control'],
doc='zerver/integrations/perforce.md'),
'phabricator': Integration('phabricator', 'phabricator', ['version-control'],
doc='zerver/integrations/phabricator.md'),
'puppet': Integration('puppet', 'puppet', ['deployment'], doc='zerver/integrations/puppet.md'),
'redmine': Integration('redmine', 'redmine', ['project-management'],
doc='zerver/integrations/redmine.md'),
'rss': Integration('rss', 'rss', ['communication'],
display_name='RSS', doc='zerver/integrations/rss.md'),
'svn': Integration('svn', 'svn', ['version-control'], doc='zerver/integrations/svn.md'),
'trac': Integration('trac', 'trac', ['project-management'], doc='zerver/integrations/trac.md'),
'trello-plugin': Integration(
'trello-plugin',
'trello-plugin',
['project-management'],
logo='static/images/integrations/logos/trello.svg',
secondary_line_text='(legacy)',
display_name='Trello',
doc='zerver/integrations/trello-plugin.md',
stream_name='trello',
legacy=True
),
'twitter': Integration('twitter', 'twitter', ['customer-support', 'marketing'],
doc='zerver/integrations/twitter.md'),
} # type: Dict[str, Integration]
BOT_INTEGRATIONS = [
BotIntegration('github_detail', ['version-control', 'bots'],
display_name='GitHub Detail'),
BotIntegration('xkcd', ['bots', 'misc'], display_name='xkcd'),
] # type: List[BotIntegration]
HUBOT_INTEGRATIONS = [
HubotIntegration('assembla', ['version-control', 'project-management'],
display_name='Assembla', logo_alt='Assembla'),
HubotIntegration('bonusly', ['hr']),
HubotIntegration('chartbeat', ['marketing'], display_name='Chartbeat'),
HubotIntegration('darksky', ['misc'], display_name='Dark Sky',
logo_alt='Dark Sky logo'),
HubotIntegration('google-hangouts', ['communication'], display_name='Google Hangouts',
logo_alt='Google Hangouts logo'),
HubotIntegration('instagram', ['misc'], display_name='Instagram'),
HubotIntegration('mailchimp', ['communication', 'marketing'],
display_name='MailChimp'),
HubotIntegration('google-translate', ['misc'],
display_name="Google Translate", logo_alt='Google Translate logo'),
HubotIntegration('youtube', ['misc'], display_name='YouTube'),
] # type: List[HubotIntegration]
for hubot_integration in HUBOT_INTEGRATIONS:
INTEGRATIONS[hubot_integration.name] = hubot_integration
for webhook_integration in WEBHOOK_INTEGRATIONS:
INTEGRATIONS[webhook_integration.name] = webhook_integration
for bot_integration in BOT_INTEGRATIONS:
INTEGRATIONS[bot_integration.name] = bot_integration
| [
"str",
"str",
"List[str]",
"str",
"List[str]",
"str",
"List[str]",
"str",
"List[str]",
"str",
"List[str]",
"str",
"Any",
"Any"
] | [
2015,
2033,
2050,
4206,
4223,
5626,
5643,
7076,
7093,
8012,
8029,
9154,
9166,
9181
] | [
2018,
2036,
2059,
4209,
4232,
5629,
5652,
7079,
7102,
8015,
8038,
9157,
9169,
9184
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/json_encoder_for_html.py | import json
from typing import Any, Dict, Iterator, Optional
# Taken from
# https://github.com/simplejson/simplejson/blob/8edc82afcf6f7512b05fba32baa536fe756bd273/simplejson/encoder.py#L378-L402
# License: MIT
class JSONEncoderForHTML(json.JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o: Any) -> str:
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
return ''.join(chunks)
def iterencode(self, o: Any, _one_shot: bool=False) -> Iterator[str]:
chunks = super().iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
| [
"Any",
"Any"
] | [
580,
815
] | [
583,
818
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/logging_util.py | # System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html
from django.utils.timezone import now as timezone_now
from django.utils.timezone import utc as timezone_utc
import hashlib
import logging
import re
import traceback
from typing import Optional
from datetime import datetime, timedelta
from django.conf import settings
from logging import Logger
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter:
last_error = datetime.min.replace(tzinfo=timezone_utc)
def filter(self, record: logging.LogRecord) -> bool:
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = '\n'.join(traceback.format_exception(*record.exc_info))
else:
tb = str(record)
key = self.__class__.__name__.upper() + hashlib.sha1(tb.encode()).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone_now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone_now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return True
class ReturnEnabled(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return settings.LOGGING_ENABLED
class RequireReallyDeployed(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record: logging.LogRecord) -> bool:
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
IGNORABLE_404_URLS = [
re.compile(r'^/apple-touch-icon.*\.png$'),
re.compile(r'^/favicon\.ico$'),
re.compile(r'^/robots\.txt$'),
re.compile(r'^/django_static_404.html$'),
re.compile(r'^/wp-login.php$'),
]
def skip_boring_404s(record: logging.LogRecord) -> bool:
"""Prevents Django's 'Not Found' warnings from being logged for common
404 errors that don't reflect a problem in Zulip. The overall
result is to keep the Zulip error logs cleaner than they would
otherwise be.
Assumes that its input is a django.request log record.
"""
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') != 404:
return True
# We're only interested in filtering the "Not Found" errors.
if getattr(record, 'msg') != 'Not Found: %s':
return True
path = getattr(record, 'args', [''])[0]
for pattern in IGNORABLE_404_URLS:
if re.match(pattern, path):
return False
return True
def skip_site_packages_logs(record: logging.LogRecord) -> bool:
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
def find_log_caller_module(record: logging.LogRecord) -> Optional[str]:
'''Find the module name corresponding to where this record was logged.
Sadly `record.module` is just the innermost component of the full
module name, so we have to go reconstruct this ourselves.
'''
# Repeat a search similar to that in logging.Logger.findCaller.
# The logging call should still be on the stack somewhere; search until
# we find something in the same source file, and that should give the
# right module name.
f = logging.currentframe() # type: ignore # Not in typeshed, and arguably shouldn't be
while f is not None:
if f.f_code.co_filename == record.pathname:
return f.f_globals.get('__name__')
f = f.f_back
return None
logger_nicknames = {
'root': '', # This one is more like undoing a nickname.
'zulip.requests': 'zr', # Super common.
}
def find_log_origin(record: logging.LogRecord) -> str:
logger_name = logger_nicknames.get(record.name, record.name)
if settings.LOGGING_SHOW_MODULE:
module_name = find_log_caller_module(record)
if module_name == logger_name or module_name == record.name:
# Abbreviate a bit.
return logger_name
else:
return '{}/{}'.format(logger_name, module_name or '?')
else:
return logger_name
log_level_abbrevs = {
'DEBUG': 'DEBG',
'INFO': 'INFO',
'WARNING': 'WARN',
'ERROR': 'ERR',
'CRITICAL': 'CRIT',
}
def abbrev_log_levelname(levelname: str) -> str:
# It's unlikely someone will set a custom log level with a custom name,
# but it's an option, so we shouldn't crash if someone does.
return log_level_abbrevs.get(levelname, levelname[:4])
class ZulipFormatter(logging.Formatter):
# Used in the base implementation. Default uses `,`.
default_msec_format = '%s.%03d'
def __init__(self) -> None:
super().__init__(fmt=self._compute_fmt())
def _compute_fmt(self) -> str:
pieces = ['%(asctime)s', '%(zulip_level_abbrev)-4s']
if settings.LOGGING_SHOW_PID:
pieces.append('pid:%(process)d')
pieces.extend(['[%(zulip_origin)s]', '%(message)s'])
return ' '.join(pieces)
def format(self, record: logging.LogRecord) -> str:
if not getattr(record, 'zulip_decorated', False):
# The `setattr` calls put this logic explicitly outside the bounds of the
# type system; otherwise mypy would complain LogRecord lacks these attributes.
setattr(record, 'zulip_level_abbrev', abbrev_log_levelname(record.levelname))
setattr(record, 'zulip_origin', find_log_origin(record))
setattr(record, 'zulip_decorated', True)
return super().format(record)
def log_to_file(logger: Logger,
filename: str,
log_format: str="%(asctime)s %(levelname)-8s %(message)s",
) -> None:
"""Note: `filename` should be declared in zproject/settings.py in ZULIP_PATHS."""
formatter = logging.Formatter(log_format)
handler = logging.FileHandler(filename)
handler.setFormatter(formatter)
logger.addHandler(handler)
| [
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"logging.LogRecord",
"str",
"logging.LogRecord",
"Logger",
"str"
] | [
579,
1973,
2088,
2231,
2365,
2933,
3849,
4159,
5068,
5682,
6417,
6954,
6988
] | [
596,
1990,
2105,
2248,
2382,
2950,
3866,
4176,
5085,
5685,
6434,
6960,
6991
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/management.py | # Library code for use in management commands
import sys
import time
from argparse import ArgumentParser
from django.conf import settings
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import BaseCommand, CommandError
from typing import Any, Dict, Optional, List
from zerver.models import Realm, UserProfile, Client, get_client
def is_integer_string(val: str) -> bool:
try:
int(val)
return True
except ValueError:
return False
def check_config() -> None:
for (setting_name, default) in settings.REQUIRED_SETTINGS:
# if required setting is the same as default OR is not found in settings,
# throw error to add/set that setting in config
try:
if settings.__getattr__(setting_name) != default:
continue
except AttributeError:
pass
raise CommandError("Error: You must set %s in /etc/zulip/settings.py." % (setting_name,))
def sleep_forever() -> None:
while True: # nocoverage
time.sleep(10**9)
class ZulipBaseCommand(BaseCommand):
def add_realm_args(self, parser: ArgumentParser, required: bool=False,
help: Optional[str]=None) -> None:
if help is None:
help = """The numeric or string ID (subdomain) of the Zulip organization to modify.
You can use the command list_realms to find ID of the realms in this server."""
parser.add_argument(
'-r', '--realm',
dest='realm_id',
required=required,
type=str,
help=help)
def add_user_list_args(self, parser: ArgumentParser,
help: str='A comma-separated list of email addresses.',
all_users_help: str="All users in realm.") -> None:
parser.add_argument(
'-u', '--users',
dest='users',
type=str,
help=help)
parser.add_argument(
'-a', '--all-users',
dest='all_users',
action="store_true",
default=False,
help=all_users_help)
def get_realm(self, options: Dict[str, Any]) -> Optional[Realm]:
val = options["realm_id"]
if val is None:
return None
# If they specified a realm argument, we need to ensure the
# realm exists. We allow two formats: the numeric ID for the
# realm and the string ID of the realm.
try:
if is_integer_string(val):
return Realm.objects.get(id=val)
return Realm.objects.get(string_id=val)
except Realm.DoesNotExist:
raise CommandError("There is no realm with id '%s'. Aborting." %
(options["realm_id"],))
def get_users(self, options: Dict[str, Any], realm: Optional[Realm]) -> List[UserProfile]:
if "all_users" in options:
all_users = options["all_users"]
if not options["users"] and not all_users:
raise CommandError("You have to pass either -u/--users or -a/--all-users.")
if options["users"] and all_users:
raise CommandError("You can't use both -u/--users and -a/--all-users.")
if all_users and realm is None:
raise CommandError("The --all-users option requires a realm; please pass --realm.")
if all_users:
return UserProfile.objects.filter(realm=realm)
if options["users"] is None:
return []
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(self.get_user(email, realm))
return user_profiles
def get_user(self, email: str, realm: Optional[Realm]) -> UserProfile:
# If a realm is specified, try to find the user there, and
# throw an error if they don't exist.
if realm is not None:
try:
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
except UserProfile.DoesNotExist:
raise CommandError("The realm '%s' does not contain a user with email '%s'" % (realm, email))
# Realm is None in the remaining code path. Here, we
# optimistically try to see if there is exactly one user with
# that email; if so, we'll return it.
try:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
except MultipleObjectsReturned:
raise CommandError("This Zulip server contains multiple users with that email " +
"(in different realms); please pass `--realm` "
"to specify which one to modify.")
except UserProfile.DoesNotExist:
raise CommandError("This Zulip server does not contain a user with email '%s'" % (email,))
def get_client(self) -> Client:
"""Returns a Zulip Client object to be used for things done in management commands"""
return get_client("ZulipServer")
| [
"str",
"ArgumentParser",
"ArgumentParser",
"Dict[str, Any]",
"Dict[str, Any]",
"Optional[Realm]",
"str",
"Optional[Realm]"
] | [
404,
1146,
1649,
2176,
2835,
2858,
3813,
3825
] | [
407,
1160,
1663,
2190,
2849,
2873,
3816,
3840
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/mdiff.py | import os
import subprocess
import logging
import difflib
class DiffException(Exception):
pass
def diff_strings(output: str, expected_output: str) -> str:
mdiff_path = "frontend_tests/zjsunit/mdiff.js"
if not os.path.isfile(mdiff_path): # nocoverage
msg = "Cannot find mdiff for markdown diff rendering"
logging.error(msg)
raise DiffException(msg)
command = ['node', mdiff_path, output, expected_output]
diff = subprocess.check_output(command).decode('utf-8')
return diff
| [
"str",
"str"
] | [
126,
148
] | [
129,
151
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/mention.py |
from typing import Optional, Set
import re
# Match multi-word string between @** ** or match any one-word
# sequences after @
find_mentions = r'(?<![^\s\'\"\(,:<])@(\*\*[^\*]+\*\*|all|everyone|stream)'
user_group_mentions = r'(?<![^\s\'\"\(,:<])@(\*[^\*]+\*)'
wildcards = ['all', 'everyone', 'stream']
def user_mention_matches_wildcard(mention: str) -> bool:
return mention in wildcards
def extract_mention_text(s: str) -> Optional[str]:
if s.startswith("**") and s.endswith("**"):
text = s[2:-2]
if text in wildcards:
return None
return text
# We don't care about @all, @everyone or @stream
return None
def possible_mentions(content: str) -> Set[str]:
matches = re.findall(find_mentions, content)
# mention texts can either be names, or an extended name|id syntax.
texts_with_none = (extract_mention_text(match) for match in matches)
texts = {text for text in texts_with_none if text}
return texts
def extract_user_group(matched_text: str) -> str:
return matched_text[1:-1]
def possible_user_group_mentions(content: str) -> Set[str]:
matches = re.findall(user_group_mentions, content)
return {extract_user_group(match) for match in matches}
| [
"str",
"str",
"str",
"str",
"str"
] | [
350,
425,
695,
1017,
1103
] | [
353,
428,
698,
1020,
1106
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/message.py |
import datetime
import ujson
import zlib
from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from django.db.models import Sum
from analytics.lib.counts import COUNT_STATS, RealmCount
from zerver.lib.avatar import get_avatar_field
import zerver.lib.bugdown as bugdown
from zerver.lib.cache import (
cache_with_key,
generic_bulk_cached_fetch,
to_dict_cache_key,
to_dict_cache_key_id,
realm_first_visible_message_id_cache_key,
cache_get, cache_set,
)
from zerver.lib.request import JsonableError
from zerver.lib.stream_subscription import (
get_stream_subscriptions_for_user,
)
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.topic import (
DB_TOPIC_NAME,
MESSAGE__TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
)
from zerver.lib.topic_mutes import (
build_topic_mute_checker,
topic_is_muted,
)
from zerver.models import (
get_display_recipient_by_id,
get_user_profile_by_id,
query_for_ids,
Message,
Realm,
Recipient,
Stream,
SubMessage,
Subscription,
UserProfile,
UserMessage,
Reaction,
get_usermessage_by_message_id,
)
from typing import Any, Dict, List, Optional, Set, Tuple, Union, Sequence
from mypy_extensions import TypedDict
RealmAlertWords = Dict[int, List[str]]
RawUnreadMessagesResult = TypedDict('RawUnreadMessagesResult', {
'pm_dict': Dict[int, Any],
'stream_dict': Dict[int, Any],
'huddle_dict': Dict[int, Any],
'mentions': Set[int],
'muted_stream_ids': List[int],
'unmuted_stream_msgs': Set[int],
})
UnreadMessagesResult = TypedDict('UnreadMessagesResult', {
'pms': List[Dict[str, Any]],
'streams': List[Dict[str, Any]],
'huddles': List[Dict[str, Any]],
'mentions': List[int],
'count': int,
})
# We won't try to fetch more unread message IDs from the database than
# this limit. The limit is super high, in large part because it means
# client-side code mostly doesn't need to think about the case that a
# user has more older unread messages that were cut off.
MAX_UNREAD_MESSAGES = 50000
def messages_for_ids(message_ids: List[int],
user_message_flags: Dict[int, List[str]],
search_fields: Dict[int, Dict[str, str]],
apply_markdown: bool,
client_gravatar: bool,
allow_edit_history: bool) -> List[Dict[str, Any]]:
cache_transformer = MessageDict.build_dict_from_raw_db_row
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(to_dict_cache_key_id,
MessageDict.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = [] # type: List[Dict[str, Any]]
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
if message_id in search_fields:
msg_dict.update(search_fields[message_id])
# Make sure that we never send message edit history to clients
# in realms with allow_edit_history disabled.
if "edit_history" in msg_dict and not allow_edit_history:
del msg_dict["edit_history"]
message_list.append(msg_dict)
MessageDict.post_process_dicts(message_list, apply_markdown, client_gravatar)
return message_list
def sew_messages_and_reactions(messages: List[Dict[str, Any]],
reactions: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message['reactions'] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message['id']: message for message in messages}
for reaction in reactions:
converted_messages[reaction['message_id']]['reactions'].append(
reaction)
return list(converted_messages.values())
def sew_messages_and_submessages(messages: List[Dict[str, Any]],
submessages: List[Dict[str, Any]]) -> None:
# This is super similar to sew_messages_and_reactions.
for message in messages:
message['submessages'] = []
message_dict = {message['id']: message for message in messages}
for submessage in submessages:
message_id = submessage['message_id']
if message_id in message_dict:
message = message_dict[message_id]
message['submessages'].append(submessage)
def extract_message_dict(message_bytes: bytes) -> Dict[str, Any]:
return ujson.loads(zlib.decompress(message_bytes).decode("utf-8"))
def stringify_message_dict(message_dict: Dict[str, Any]) -> bytes:
return zlib.compress(ujson.dumps(message_dict).encode())
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def message_to_dict_json(message: Message) -> bytes:
return MessageDict.to_dict_uncached(message)
def save_message_rendered_content(message: Message, content: str) -> str:
rendered_content = render_markdown(message, content, realm=message.get_realm())
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
return rendered_content
class MessageDict:
@staticmethod
def wide_dict(message: Message) -> Dict[str, Any]:
'''
The next two lines get the cachable field related
to our message object, with the side effect of
populating the cache.
'''
json = message_to_dict_json(message)
obj = extract_message_dict(json)
'''
The steps below are similar to what we do in
post_process_dicts(), except we don't call finalize_payload(),
since that step happens later in the queue
processor.
'''
MessageDict.bulk_hydrate_sender_info([obj])
MessageDict.hydrate_recipient_info(obj)
return obj
@staticmethod
def post_process_dicts(objs: List[Dict[str, Any]], apply_markdown: bool, client_gravatar: bool) -> None:
MessageDict.bulk_hydrate_sender_info(objs)
for obj in objs:
MessageDict.hydrate_recipient_info(obj)
MessageDict.finalize_payload(obj, apply_markdown, client_gravatar)
@staticmethod
def finalize_payload(obj: Dict[str, Any],
apply_markdown: bool,
client_gravatar: bool) -> None:
MessageDict.set_sender_avatar(obj, client_gravatar)
if apply_markdown:
obj['content_type'] = 'text/html'
obj['content'] = obj['rendered_content']
else:
obj['content_type'] = 'text/x-markdown'
del obj['rendered_content']
del obj['sender_realm_id']
del obj['sender_avatar_source']
del obj['sender_avatar_version']
del obj['raw_display_recipient']
del obj['recipient_type']
del obj['recipient_type_id']
del obj['sender_is_mirror_dummy']
@staticmethod
def to_dict_uncached(message: Message) -> bytes:
dct = MessageDict.to_dict_uncached_helper(message)
return stringify_message_dict(dct)
@staticmethod
def to_dict_uncached_helper(message: Message) -> Dict[str, Any]:
return MessageDict.build_message_dict(
message = message,
message_id = message.id,
last_edit_time = message.last_edit_time,
edit_history = message.edit_history,
content = message.content,
topic_name = message.topic_name(),
pub_date = message.pub_date,
rendered_content = message.rendered_content,
rendered_content_version = message.rendered_content_version,
sender_id = message.sender.id,
sender_realm_id = message.sender.realm_id,
sending_client_name = message.sending_client.name,
recipient_id = message.recipient.id,
recipient_type = message.recipient.type,
recipient_type_id = message.recipient.type_id,
reactions = Reaction.get_raw_db_rows([message.id]),
submessages = SubMessage.get_raw_db_rows([message.id]),
)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
# This is a special purpose function optimized for
# callers like get_messages_backend().
fields = [
'id',
DB_TOPIC_NAME,
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__realm_id',
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
submessages = SubMessage.get_raw_db_rows(needed_ids)
sew_messages_and_submessages(messages, submessages)
reactions = Reaction.get_raw_db_rows(needed_ids)
return sew_messages_and_reactions(messages, reactions)
@staticmethod
def build_dict_from_raw_db_row(row: Dict[str, Any]) -> Dict[str, Any]:
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return MessageDict.build_message_dict(
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
topic_name = row[DB_TOPIC_NAME],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_realm_id = row['sender__realm_id'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
reactions=row['reactions'],
submessages=row['submessages'],
)
@staticmethod
def build_message_dict(
message: Optional[Message],
message_id: int,
last_edit_time: Optional[datetime.datetime],
edit_history: Optional[str],
content: str,
topic_name: str,
pub_date: datetime.datetime,
rendered_content: Optional[str],
rendered_content_version: Optional[int],
sender_id: int,
sender_realm_id: int,
sending_client_name: str,
recipient_id: int,
recipient_type: int,
recipient_type_id: int,
reactions: List[Dict[str, Any]],
submessages: List[Dict[str, Any]]
) -> Dict[str, Any]:
obj = dict(
id = message_id,
sender_id = sender_id,
content = content,
recipient_type_id = recipient_type_id,
recipient_type = recipient_type,
recipient_id = recipient_id,
timestamp = datetime_to_timestamp(pub_date),
client = sending_client_name)
obj[TOPIC_NAME] = topic_name
obj['sender_realm_id'] = sender_realm_id
obj['raw_display_recipient'] = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
obj[TOPIC_LINKS] = bugdown.topic_links(sender_realm_id, topic_name)
if last_edit_time is not None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
assert edit_history is not None
obj['edit_history'] = ujson.loads(edit_history)
if Message.need_to_render_content(rendered_content, rendered_content_version, bugdown.version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate bugdown dependencies
message = Message.objects.select_related().get(id=message_id)
assert message is not None # Hint for mypy.
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = save_message_rendered_content(message, content)
if rendered_content is not None:
obj['rendered_content'] = rendered_content
else:
obj['rendered_content'] = ('<p>[Zulip note: Sorry, we could not ' +
'understand the formatting of your message]</p>')
if rendered_content is not None:
obj['is_me_message'] = Message.is_status_message(content, rendered_content)
else:
obj['is_me_message'] = False
obj['reactions'] = [ReactionDict.build_dict_from_raw_db_row(reaction)
for reaction in reactions]
obj['submessages'] = submessages
return obj
@staticmethod
def bulk_hydrate_sender_info(objs: List[Dict[str, Any]]) -> None:
sender_ids = list({
obj['sender_id']
for obj in objs
})
if not sender_ids:
return
query = UserProfile.objects.values(
'id',
'full_name',
'short_name',
'email',
'realm__string_id',
'avatar_source',
'avatar_version',
'is_mirror_dummy',
)
rows = query_for_ids(query, sender_ids, 'zerver_userprofile.id')
sender_dict = {
row['id']: row
for row in rows
}
for obj in objs:
sender_id = obj['sender_id']
user_row = sender_dict[sender_id]
obj['sender_full_name'] = user_row['full_name']
obj['sender_short_name'] = user_row['short_name']
obj['sender_email'] = user_row['email']
obj['sender_realm_str'] = user_row['realm__string_id']
obj['sender_avatar_source'] = user_row['avatar_source']
obj['sender_avatar_version'] = user_row['avatar_version']
obj['sender_is_mirror_dummy'] = user_row['is_mirror_dummy']
@staticmethod
def hydrate_recipient_info(obj: Dict[str, Any]) -> None:
'''
This method hyrdrates recipient info with things
like full names and emails of senders. Eventually
our clients should be able to hyrdrate these fields
themselves with info they already have on users.
'''
display_recipient = obj['raw_display_recipient']
recipient_type = obj['recipient_type']
recipient_type_id = obj['recipient_type_id']
sender_is_mirror_dummy = obj['sender_is_mirror_dummy']
sender_email = obj['sender_email']
sender_full_name = obj['sender_full_name']
sender_short_name = obj['sender_short_name']
sender_id = obj['sender_id']
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, str)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and themself, preserving ordering
recip = {'email': sender_email,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
else:
raise AssertionError("Invalid recipient type %s" % (recipient_type,))
obj['display_recipient'] = display_recipient
obj['type'] = display_type
if obj['type'] == 'stream':
obj['stream_id'] = recipient_type_id
@staticmethod
def set_sender_avatar(obj: Dict[str, Any], client_gravatar: bool) -> None:
sender_id = obj['sender_id']
sender_realm_id = obj['sender_realm_id']
sender_email = obj['sender_email']
sender_avatar_source = obj['sender_avatar_source']
sender_avatar_version = obj['sender_avatar_version']
obj['avatar_url'] = get_avatar_field(
user_id=sender_id,
realm_id=sender_realm_id,
email=sender_email,
avatar_source=sender_avatar_source,
avatar_version=sender_avatar_version,
medium=False,
client_gravatar=client_gravatar,
)
class ReactionDict:
@staticmethod
def build_dict_from_raw_db_row(row: Dict[str, Any]) -> Dict[str, Any]:
return {'emoji_name': row['emoji_name'],
'emoji_code': row['emoji_code'],
'reaction_type': row['reaction_type'],
'user': {'email': row['user_profile__email'],
'id': row['user_profile__id'],
'full_name': row['user_profile__full_name']}}
def access_message(user_profile: UserProfile, message_id: int) -> Tuple[Message, Optional[UserMessage]]:
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
"""
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
user_message = get_usermessage_by_message_id(user_profile, message_id)
if has_message_access(user_profile, message, user_message):
return (message, user_message)
raise JsonableError(_("Invalid message(s)"))
def has_message_access(user_profile: UserProfile, message: Message,
user_message: Optional[UserMessage]) -> bool:
if user_message is None:
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
return False
stream = Stream.objects.get(id=message.recipient.type_id)
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
return False
if not stream.is_history_public_to_subscribers():
# You can't access messages you didn't directly receive
# unless history is public to subscribers.
return False
if not stream.is_public():
# This stream is an invite-only stream where message
# history is available to subscribers. So we check if
# you're subscribed.
if not Subscription.objects.filter(user_profile=user_profile, active=True,
recipient=message.recipient).exists():
return False
# You are subscribed, so let this fall through to the public stream case.
elif user_profile.is_guest:
# Guest users don't get automatic access to public stream messages
if not Subscription.objects.filter(user_profile=user_profile, active=True,
recipient=message.recipient).exists():
return False
else:
# Otherwise, the message was sent to a public stream in
# your realm, so return the message, user_message pair
pass
return True
def bulk_access_messages(user_profile: UserProfile, messages: Sequence[Message]) -> List[Message]:
filtered_messages = []
for message in messages:
user_message = get_usermessage_by_message_id(user_profile, message.id)
if has_message_access(user_profile, message, user_message):
filtered_messages.append(message)
return filtered_messages
def render_markdown(message: Message,
content: str,
realm: Optional[Realm]=None,
realm_alert_words: Optional[RealmAlertWords]=None,
user_ids: Optional[Set[int]]=None,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
'''
This is basically just a wrapper for do_render_markdown.
'''
if user_ids is None:
message_user_ids = set() # type: Set[int]
else:
message_user_ids = user_ids
if realm is None:
realm = message.get_realm()
if realm_alert_words is None:
realm_alert_words = dict()
sender = get_user_profile_by_id(message.sender_id)
sent_by_bot = sender.is_bot
translate_emoticons = sender.translate_emoticons
rendered_content = do_render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words=realm_alert_words,
message_user_ids=message_user_ids,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
mention_data=mention_data,
email_gateway=email_gateway,
)
return rendered_content
def do_render_markdown(message: Message,
content: str,
realm: Realm,
realm_alert_words: RealmAlertWords,
message_user_ids: Set[int],
sent_by_bot: bool,
translate_emoticons: bool,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids`, `mentions_user_group_ids`, and
`mentions_wildcard`. These are only on this Django object and are not
saved in the database.
"""
message.mentions_wildcard = False
message.mentions_user_ids = set()
message.mentions_user_group_ids = set()
message.alert_words = set()
message.links_for_preview = set()
possible_words = set() # type: Set[str]
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
possible_words.update(set(words))
# DO MAIN WORK HERE -- call bugdown to convert
rendered_content = bugdown.convert(
content,
message=message,
message_realm=realm,
possible_words=possible_words,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
mention_data=mention_data,
email_gateway=email_gateway
)
message.user_ids_with_alert_words = set()
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
if set(words).intersection(message.alert_words):
message.user_ids_with_alert_words.add(user_id)
return rendered_content
def huddle_users(recipient_id: int) -> str:
display_recipient = get_display_recipient_by_id(recipient_id,
Recipient.HUDDLE,
None) # type: Union[str, List[Dict[str, Any]]]
# str is for streams.
assert not isinstance(display_recipient, str)
user_ids = [obj['id'] for obj in display_recipient] # type: List[int]
user_ids = sorted(user_ids)
return ','.join(str(uid) for uid in user_ids)
def aggregate_message_dict(input_dict: Dict[int, Dict[str, Any]],
lookup_fields: List[str],
collect_senders: bool) -> List[Dict[str, Any]]:
lookup_dict = dict() # type: Dict[Tuple[Any, ...], Dict[str, Any]]
'''
A concrete example might help explain the inputs here:
input_dict = {
1002: dict(stream_id=5, topic='foo', sender_id=40),
1003: dict(stream_id=5, topic='foo', sender_id=41),
1004: dict(stream_id=6, topic='baz', sender_id=99),
}
lookup_fields = ['stream_id', 'topic']
The first time through the loop:
attribute_dict = dict(stream_id=5, topic='foo', sender_id=40)
lookup_dict = (5, 'foo')
lookup_dict = {
(5, 'foo'): dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
sender_ids=[40, 41],
),
...
}
result = [
dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
sender_ids=[40, 41],
),
...
]
'''
for message_id, attribute_dict in input_dict.items():
lookup_key = tuple([attribute_dict[f] for f in lookup_fields])
if lookup_key not in lookup_dict:
obj = {}
for f in lookup_fields:
obj[f] = attribute_dict[f]
obj['unread_message_ids'] = []
if collect_senders:
obj['sender_ids'] = set()
lookup_dict[lookup_key] = obj
bucket = lookup_dict[lookup_key]
bucket['unread_message_ids'].append(message_id)
if collect_senders:
bucket['sender_ids'].add(attribute_dict['sender_id'])
for dct in lookup_dict.values():
dct['unread_message_ids'].sort()
if collect_senders:
dct['sender_ids'] = sorted(list(dct['sender_ids']))
sorted_keys = sorted(lookup_dict.keys())
return [lookup_dict[k] for k in sorted_keys]
def get_inactive_recipient_ids(user_profile: UserProfile) -> List[int]:
rows = get_stream_subscriptions_for_user(user_profile).filter(
active=False,
).values(
'recipient_id'
)
inactive_recipient_ids = [
row['recipient_id']
for row in rows]
return inactive_recipient_ids
def get_muted_stream_ids(user_profile: UserProfile) -> List[int]:
rows = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
in_home_view=False,
).values(
'recipient__type_id'
)
muted_stream_ids = [
row['recipient__type_id']
for row in rows]
return muted_stream_ids
def get_starred_message_ids(user_profile: UserProfile) -> List[int]:
return list(UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_starred()]
).order_by(
'message_id'
).values_list('message_id', flat=True)[0:10000])
def get_raw_unread_data(user_profile: UserProfile) -> RawUnreadMessagesResult:
excluded_recipient_ids = get_inactive_recipient_ids(user_profile)
user_msgs = UserMessage.objects.filter(
user_profile=user_profile
).exclude(
message__recipient_id__in=excluded_recipient_ids
).extra(
where=[UserMessage.where_unread()]
).values(
'message_id',
'message__sender_id',
MESSAGE__TOPIC,
'message__recipient_id',
'message__recipient__type',
'message__recipient__type_id',
'flags',
).order_by("-message_id")
# Limit unread messages for performance reasons.
user_msgs = list(user_msgs[:MAX_UNREAD_MESSAGES])
rows = list(reversed(user_msgs))
muted_stream_ids = get_muted_stream_ids(user_profile)
topic_mute_checker = build_topic_mute_checker(user_profile)
def is_row_muted(stream_id: int, recipient_id: int, topic: str) -> bool:
if stream_id in muted_stream_ids:
return True
if topic_mute_checker(recipient_id, topic):
return True
return False
huddle_cache = {} # type: Dict[int, str]
def get_huddle_users(recipient_id: int) -> str:
if recipient_id in huddle_cache:
return huddle_cache[recipient_id]
user_ids_string = huddle_users(recipient_id)
huddle_cache[recipient_id] = user_ids_string
return user_ids_string
pm_dict = {}
stream_dict = {}
unmuted_stream_msgs = set()
huddle_dict = {}
mentions = set()
for row in rows:
message_id = row['message_id']
msg_type = row['message__recipient__type']
recipient_id = row['message__recipient_id']
sender_id = row['message__sender_id']
if msg_type == Recipient.STREAM:
stream_id = row['message__recipient__type_id']
topic = row[MESSAGE__TOPIC]
stream_dict[message_id] = dict(
stream_id=stream_id,
topic=topic,
sender_id=sender_id,
)
if not is_row_muted(stream_id, recipient_id, topic):
unmuted_stream_msgs.add(message_id)
elif msg_type == Recipient.PERSONAL:
pm_dict[message_id] = dict(
sender_id=sender_id,
)
elif msg_type == Recipient.HUDDLE:
user_ids_string = get_huddle_users(recipient_id)
huddle_dict[message_id] = dict(
user_ids_string=user_ids_string,
)
is_mentioned = (row['flags'] & UserMessage.flags.mentioned) != 0
if is_mentioned:
mentions.add(message_id)
return dict(
pm_dict=pm_dict,
stream_dict=stream_dict,
muted_stream_ids=muted_stream_ids,
unmuted_stream_msgs=unmuted_stream_msgs,
huddle_dict=huddle_dict,
mentions=mentions,
)
def aggregate_unread_data(raw_data: RawUnreadMessagesResult) -> UnreadMessagesResult:
pm_dict = raw_data['pm_dict']
stream_dict = raw_data['stream_dict']
unmuted_stream_msgs = raw_data['unmuted_stream_msgs']
huddle_dict = raw_data['huddle_dict']
mentions = list(raw_data['mentions'])
count = len(pm_dict) + len(unmuted_stream_msgs) + len(huddle_dict)
pm_objects = aggregate_message_dict(
input_dict=pm_dict,
lookup_fields=[
'sender_id',
],
collect_senders=False,
)
stream_objects = aggregate_message_dict(
input_dict=stream_dict,
lookup_fields=[
'stream_id',
'topic',
],
collect_senders=True,
)
huddle_objects = aggregate_message_dict(
input_dict=huddle_dict,
lookup_fields=[
'user_ids_string',
],
collect_senders=False,
)
result = dict(
pms=pm_objects,
streams=stream_objects,
huddles=huddle_objects,
mentions=mentions,
count=count) # type: UnreadMessagesResult
return result
def apply_unread_message_event(user_profile: UserProfile,
state: Dict[str, Any],
message: Dict[str, Any],
flags: List[str]) -> None:
message_id = message['id']
if message['type'] == 'stream':
message_type = 'stream'
elif message['type'] == 'private':
others = [
recip for recip in message['display_recipient']
if recip['id'] != message['sender_id']
]
if len(others) <= 1:
message_type = 'private'
else:
message_type = 'huddle'
else:
raise AssertionError("Invalid message type %s" % (message['type'],))
sender_id = message['sender_id']
if message_type == 'stream':
stream_id = message['stream_id']
topic = message[TOPIC_NAME]
new_row = dict(
stream_id=stream_id,
topic=topic,
sender_id=sender_id,
)
state['stream_dict'][message_id] = new_row
if stream_id not in state['muted_stream_ids']:
# This next check hits the database.
if not topic_is_muted(user_profile, stream_id, topic):
state['unmuted_stream_msgs'].add(message_id)
elif message_type == 'private':
sender_id = message['sender_id']
new_row = dict(
sender_id=sender_id,
)
state['pm_dict'][message_id] = new_row
else:
display_recipient = message['display_recipient']
user_ids = [obj['id'] for obj in display_recipient]
user_ids = sorted(user_ids)
user_ids_string = ','.join(str(uid) for uid in user_ids)
new_row = dict(
user_ids_string=user_ids_string,
)
state['huddle_dict'][message_id] = new_row
if 'mentioned' in flags:
state['mentions'].add(message_id)
def estimate_recent_messages(realm: Realm, hours: int) -> int:
stat = COUNT_STATS['messages_sent:is_bot:hour']
d = timezone_now() - datetime.timedelta(hours=hours)
return RealmCount.objects.filter(property=stat.property, end_time__gt=d,
realm=realm).aggregate(Sum('value'))['value__sum'] or 0
def get_first_visible_message_id(realm: Realm) -> int:
val = cache_get(realm_first_visible_message_id_cache_key(realm))
if val is not None:
return val[0]
return 0
def maybe_update_first_visible_message_id(realm: Realm, lookback_hours: int) -> None:
cache_empty = cache_get(realm_first_visible_message_id_cache_key(realm)) is None
recent_messages_count = estimate_recent_messages(realm, lookback_hours)
if realm.message_visibility_limit is not None and (recent_messages_count > 0 or cache_empty):
update_first_visible_message_id(realm)
def update_first_visible_message_id(realm: Realm) -> None:
try:
# We have verified that the limit is not none before calling this function.
assert realm.message_visibility_limit is not None
first_visible_message_id = Message.objects.filter(sender__realm=realm).values('id').\
order_by('-id')[realm.message_visibility_limit - 1]["id"]
except IndexError:
first_visible_message_id = 0
cache_set(realm_first_visible_message_id_cache_key(realm), first_visible_message_id)
| [
"List[int]",
"Dict[int, List[str]]",
"Dict[int, Dict[str, str]]",
"bool",
"bool",
"bool",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"bytes",
"Dict[str, Any]",
"Message",
"Message",
"str",
"Message",
"List[Dict[str, Any]]",
"bool",
"bool",
"Dict[str, Any]",
"bool",
"bool",
"Message",
"Message",
"List[int]",
"Dict[str, Any]",
"Optional[Message]",
"int",
"Optional[datetime.datetime]",
"Optional[str]",
"str",
"str",
"datetime.datetime",
"Optional[str]",
"Optional[int]",
"int",
"int",
"str",
"int",
"int",
"int",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"List[Dict[str, Any]]",
"Dict[str, Any]",
"Dict[str, Any]",
"bool",
"Dict[str, Any]",
"UserProfile",
"int",
"UserProfile",
"Message",
"Optional[UserMessage]",
"UserProfile",
"Sequence[Message]",
"Message",
"str",
"Message",
"str",
"Realm",
"RealmAlertWords",
"Set[int]",
"bool",
"bool",
"int",
"Dict[int, Dict[str, Any]]",
"List[str]",
"bool",
"UserProfile",
"UserProfile",
"UserProfile",
"UserProfile",
"int",
"int",
"str",
"int",
"RawUnreadMessagesResult",
"UserProfile",
"Dict[str, Any]",
"Dict[str, Any]",
"List[str]",
"Realm",
"int",
"Realm",
"Realm",
"int",
"Realm"
] | [
2157,
2209,
2267,
2331,
2375,
2422,
3782,
3846,
4469,
4537,
5024,
5163,
5337,
5449,
5467,
5796,
6468,
6506,
6529,
6801,
6858,
6906,
7534,
7715,
8736,
9694,
10804,
10847,
10880,
10935,
10971,
11000,
11027,
11076,
11129,
11167,
11201,
11239,
11270,
11303,
11339,
11367,
11414,
14411,
15636,
17613,
17646,
18316,
18728,
18753,
19574,
19596,
19642,
21296,
21319,
21665,
21703,
22913,
22954,
22989,
23038,
23096,
23142,
23192,
24664,
25188,
25257,
25312,
27207,
27524,
27871,
28163,
29030,
29049,
29061,
29327,
31063,
32198,
32249,
32305,
32359,
34078,
34092,
34425,
34618,
34641,
35005
] | [
2166,
2229,
2292,
2335,
2379,
2426,
3802,
3866,
4489,
4557,
5029,
5177,
5344,
5456,
5470,
5803,
6488,
6510,
6533,
6815,
6862,
6910,
7541,
7722,
8745,
9708,
10821,
10850,
10907,
10948,
10974,
11003,
11044,
11089,
11142,
11170,
11204,
11242,
11273,
11306,
11342,
11387,
11434,
14431,
15650,
17627,
17650,
18330,
18739,
18756,
19585,
19603,
19663,
21307,
21336,
21672,
21706,
22920,
22957,
22994,
23053,
23104,
23146,
23196,
24667,
25213,
25266,
25316,
27218,
27535,
27882,
28174,
29033,
29052,
29064,
29330,
31086,
32209,
32263,
32319,
32368,
34083,
34095,
34430,
34623,
34644,
35010
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/migrate.py | from django.db.models.query import QuerySet
from psycopg2.extensions import cursor
from typing import Any, Callable, Dict, List, Tuple, TypeVar
import re
import time
CursorObj = TypeVar('CursorObj', bound=cursor)
def create_index_if_not_exist(index_name: str, table_name: str, column_string: str,
where_clause: str) -> str:
#
# FUTURE TODO: When we no longer need to support postgres 9.3 for Trusty,
# we can use "IF NOT EXISTS", which is part of postgres 9.5
# (and which already is supported on Xenial systems).
stmt = '''
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_class
where relname = '%s'
) THEN
CREATE INDEX
%s
ON %s (%s)
%s;
END IF;
END$$;
''' % (index_name, index_name, table_name, column_string, where_clause)
return stmt
def do_batch_update(cursor: CursorObj,
table: str,
cols: List[str],
vals: List[str],
batch_size: int=10000,
sleep: float=0.1,
escape: bool=True) -> None: # nocoverage
stmt = '''
UPDATE %s
SET (%s) = (%s)
WHERE id >= %%s AND id < %%s
''' % (table, ', '.join(cols), ', '.join(['%s'] * len(cols)))
cursor.execute("SELECT MIN(id), MAX(id) FROM %s" % (table,))
(min_id, max_id) = cursor.fetchall()[0]
if min_id is None:
return
print("\n Range of rows to update: [%s, %s]" % (min_id, max_id))
while min_id <= max_id:
lower = min_id
upper = min_id + batch_size
print(' Updating range [%s,%s)' % (lower, upper))
params = list(vals) + [lower, upper]
if escape:
cursor.execute(stmt, params=params)
else:
cursor.execute(stmt % tuple(params))
min_id = upper
time.sleep(sleep)
# Once we've finished, check if any new rows were inserted to the table
if min_id > max_id:
cursor.execute("SELECT MAX(id) FROM %s" % (table,))
max_id = cursor.fetchall()[0][0]
print(" Finishing...", end='')
| [
"str",
"str",
"str",
"str",
"CursorObj",
"str",
"List[str]",
"List[str]"
] | [
258,
275,
295,
344,
1047,
1085,
1116,
1153
] | [
261,
278,
298,
347,
1056,
1088,
1125,
1162
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/mobile_auth_otp.py | # Simple one-time-pad library, to be used for encrypting Zulip API
# keys when sending them to the mobile apps via new standard mobile
# authentication flow. This encryption is used to protect against
# credential-stealing attacks where a malicious app registers the
# zulip:// URL on a device, which might otherwise allow it to hijack a
# user's API key.
#
# The decryption logic here isn't actually used by the flow; we just
# have it here as part of testing the overall library.
import binascii
from zerver.models import UserProfile
def xor_hex_strings(bytes_a: str, bytes_b: str) -> str:
"""Given two hex strings of equal length, return a hex string with
the bitwise xor of the two hex strings."""
assert len(bytes_a) == len(bytes_b)
return ''.join(["%x" % (int(x, 16) ^ int(y, 16))
for x, y in zip(bytes_a, bytes_b)])
def ascii_to_hex(input_string: str) -> str:
"""Given an ascii string, encode it as a hex string"""
return "".join([hex(ord(c))[2:].zfill(2) for c in input_string])
def hex_to_ascii(input_string: str) -> str:
"""Given a hex array, decode it back to a string"""
return binascii.unhexlify(input_string).decode('utf8')
def otp_encrypt_api_key(api_key: str, otp: str) -> str:
assert len(otp) == UserProfile.API_KEY_LENGTH * 2
hex_encoded_api_key = ascii_to_hex(api_key)
assert len(hex_encoded_api_key) == UserProfile.API_KEY_LENGTH * 2
return xor_hex_strings(hex_encoded_api_key, otp)
def otp_decrypt_api_key(otp_encrypted_api_key: str, otp: str) -> str:
assert len(otp) == UserProfile.API_KEY_LENGTH * 2
assert len(otp_encrypted_api_key) == UserProfile.API_KEY_LENGTH * 2
hex_encoded_api_key = xor_hex_strings(otp_encrypted_api_key, otp)
return hex_to_ascii(hex_encoded_api_key)
def is_valid_otp(otp: str) -> bool:
try:
assert len(otp) == UserProfile.API_KEY_LENGTH * 2
[int(c, 16) for c in otp]
return True
except Exception:
return False
| [
"str",
"str",
"str",
"str",
"str",
"str",
"str",
"str",
"str"
] | [
568,
582,
894,
1067,
1229,
1239,
1525,
1535,
1812
] | [
571,
585,
897,
1070,
1232,
1242,
1528,
1538,
1815
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/name_restrictions.py | from disposable_email_domains import blacklist
def is_reserved_subdomain(subdomain: str) -> bool:
if subdomain in ZULIP_RESERVED_SUBDOMAINS:
return True
if subdomain[-1] == 's' and subdomain[:-1] in ZULIP_RESERVED_SUBDOMAINS:
return True
if subdomain in GENERIC_RESERVED_SUBDOMAINS:
return True
if subdomain[-1] == 's' and subdomain[:-1] in GENERIC_RESERVED_SUBDOMAINS:
return True
return False
def is_disposable_domain(domain: str) -> bool:
if domain.lower() in WHITELISTED_EMAIL_DOMAINS:
return False
return domain.lower() in DISPOSABLE_DOMAINS
ZULIP_RESERVED_SUBDOMAINS = frozenset([
# zulip terms
'stream', 'channel', 'topic', 'thread', 'installation', 'organization', 'realm',
'team', 'subdomain', 'activity', 'octopus', 'acme', 'push',
# machines
'zulipdev', 'localhost', 'staging', 'prod', 'production', 'testing', 'nagios', 'nginx',
# website pages
'server', 'client', 'features', 'integration', 'bot', 'blog', 'history', 'story',
'stories', 'testimonial', 'compare', 'for', 'vs',
# competitor pages
'slack', 'mattermost', 'rocketchat', 'irc', 'twitter', 'zephyr', 'flowdock', 'spark',
'skype', 'microsoft', 'twist', 'ryver', 'matrix', 'discord', 'email', 'usenet',
# zulip names
'zulip', 'tulip', 'humbug',
# platforms
'plan9', 'electron', 'linux', 'mac', 'windows', 'cli', 'ubuntu', 'android', 'ios',
# floss
'contribute', 'floss', 'foss', 'free', 'opensource', 'open', 'code', 'license',
# intership programs
'intern', 'outreachy', 'gsoc', 'gci', 'externship',
# Things that sound like security
'auth', 'authentication', 'security',
# tech blogs
'engineering', 'infrastructure', 'tooling', 'tools', 'javascript', 'python'])
# Most of this list was curated from the following sources:
# http://wiki.dwscoalition.org/notes/List_of_reserved_subdomains (license: CC-BY-SA 3.0)
# http://stackoverflow.com/questions/11868191/which-saas-subdomains-to-block (license: CC-BY-SA 2.5)
GENERIC_RESERVED_SUBDOMAINS = frozenset([
'about', 'abuse', 'account', 'ad', 'admanager', 'admin', 'admindashboard',
'administrator', 'adsense', 'adword', 'affiliate', 'alpha', 'anonymous',
'api', 'assets', 'audio', 'badges', 'beta', 'billing', 'biz', 'blog',
'board', 'bookmark', 'bot', 'bugs', 'buy', 'cache', 'calendar', 'chat',
'clients', 'cname', 'code', 'comment', 'communities', 'community',
'contact', 'contributor', 'control', 'coppa', 'copyright', 'cpanel', 'css',
'cssproxy', 'customise', 'customize', 'dashboard', 'data', 'demo', 'deploy',
'deployment', 'desktop', 'dev', 'devel', 'developer', 'development',
'discussion', 'diversity', 'dmca', 'docs', 'donate', 'download', 'e-mail',
'email', 'embed', 'embedded', 'example', 'explore', 'faq', 'favorite',
'favourites', 'features', 'feed', 'feedback', 'files', 'forum', 'friend',
'ftp', 'general', 'gettingstarted', 'gift', 'git', 'global', 'graphs',
'guide', 'hack', 'help', 'home', 'hostmaster', 'https', 'icon', 'im',
'image', 'img', 'inbox', 'index', 'investors', 'invite', 'invoice', 'ios',
'ipad', 'iphone', 'irc', 'jabber', 'jars', 'jobs', 'join', 'js', 'kb',
'knowledgebase', 'launchpad', 'legal', 'livejournal', 'lj', 'login', 'logs',
'm', 'mail', 'main', 'manage', 'map', 'media', 'memories', 'memory',
'merchandise', 'messages', 'mobile', 'my', 'mystore', 'networks', 'new',
'newsite', 'official', 'ogg', 'online', 'order', 'paid', 'panel', 'partner',
'partnerpage', 'pay', 'payment', 'picture', 'policy', 'pop', 'popular',
'portal', 'post', 'postmaster', 'press', 'pricing', 'principles', 'privacy',
'private', 'profile', 'public', 'random', 'redirect', 'register',
'registration', 'resolver', 'root', 'rss', 's', 'sandbox', 'school',
'search', 'secure', 'servers', 'service', 'setting', 'shop', 'shortcuts',
'signin', 'signup', 'sitemap', 'sitenews', 'sites', 'sms', 'smtp', 'sorry',
'ssl', 'staff', 'stage', 'staging', 'stars', 'stat', 'static', 'statistics',
'status', 'store', 'style', 'support', 'surveys', 'svn', 'syn',
'syndicated', 'system', 'tag', 'talk', 'team', 'termsofservice', 'test',
'testers', 'ticket', 'tool', 'tos', 'trac', 'translate', 'update',
'upgrade', 'uploads', 'use', 'user', 'username', 'validation', 'videos',
'volunteer', 'web', 'webdisk', 'webmail', 'webmaster', 'whm', 'whois',
'wiki', 'www', 'www0', 'www8', 'www9', 'xml', 'xmpp', 'xoxo'])
DISPOSABLE_DOMAINS = frozenset(blacklist)
WHITELISTED_EMAIL_DOMAINS = frozenset([
# Controlled by https://www.abine.com; more legitimate than most
# disposable domains
'opayq.com', 'abinemail.com', 'blurmail.net', 'maskmemail.com',
])
| [
"str",
"str"
] | [
85,
482
] | [
88,
485
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/narrow.py | from zerver.lib.request import JsonableError
from zerver.lib.topic import (
get_topic_from_message_info,
)
from django.utils.translation import ugettext as _
from typing import Any, Callable, Dict, Iterable, Mapping, Sequence
def check_supported_events_narrow_filter(narrow: Iterable[Sequence[str]]) -> None:
for element in narrow:
operator = element[0]
if operator not in ["stream", "topic", "sender", "is"]:
raise JsonableError(_("Operator %s not supported.") % (operator,))
def is_web_public_compatible(narrow: Iterable[Dict[str, str]]) -> bool:
for element in narrow:
operator = element['operator']
if 'operand' not in element:
return False
if operator not in ["stream", "topic", "sender", "has", "search", "near", "id"]:
return False
return True
def build_narrow_filter(narrow: Iterable[Sequence[str]]) -> Callable[[Mapping[str, Any]], bool]:
"""Changes to this function should come with corresponding changes to
BuildNarrowFilterTest."""
check_supported_events_narrow_filter(narrow)
def narrow_filter(event: Mapping[str, Any]) -> bool:
message = event["message"]
flags = event["flags"]
for element in narrow:
operator = element[0]
operand = element[1]
if operator == "stream":
if message["type"] != "stream":
return False
if operand.lower() != message["display_recipient"].lower():
return False
elif operator == "topic":
if message["type"] != "stream":
return False
topic_name = get_topic_from_message_info(message)
if operand.lower() != topic_name.lower():
return False
elif operator == "sender":
if operand.lower() != message["sender_email"].lower():
return False
elif operator == "is" and operand == "private":
if message["type"] != "private":
return False
elif operator == "is" and operand in ["starred"]:
if operand not in flags:
return False
elif operator == "is" and operand == "unread":
if "read" in flags:
return False
elif operator == "is" and operand in ["alerted", "mentioned"]:
if "mentioned" not in flags:
return False
return True
return narrow_filter
| [
"Iterable[Sequence[str]]",
"Iterable[Dict[str, str]]",
"Iterable[Sequence[str]]",
"Mapping[str, Any]"
] | [
282,
554,
880,
1128
] | [
305,
578,
903,
1145
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/notifications.py |
from typing import cast, Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple
from confirmation.models import Confirmation, one_click_unsubscribe_link
from django.conf import settings
from django.template import loader
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.contrib.auth import get_backends
from django_auth_ldap.backend import LDAPBackend
from zerver.decorator import statsd_increment
from zerver.lib.message import bulk_access_messages
from zerver.lib.queue import queue_json_publish
from zerver.lib.send_email import send_future_email, FromAddress
from zerver.lib.url_encoding import personal_narrow_url, huddle_narrow_url, \
stream_narrow_url, topic_narrow_url
from zerver.models import (
Recipient,
ScheduledEmail,
UserMessage,
Stream,
get_display_recipient,
UserProfile,
get_user,
get_user_profile_by_id,
receives_offline_email_notifications,
get_context_for_message,
Message,
Realm,
)
from datetime import timedelta, datetime
from email.utils import formataddr
from lxml.cssselect import CSSSelector
import lxml.html
import re
import subprocess
import ujson
from collections import defaultdict
import pytz
def relative_to_full_url(base_url: str, content: str) -> str:
# Convert relative URLs to absolute URLs.
fragment = lxml.html.fromstring(content)
# We handle narrow URLs separately because of two reasons:
# 1: 'lxml' seems to be having an issue in dealing with URLs that begin
# `#` due to which it doesn't add a `/` before joining the base_url to
# the relative URL.
# 2: We also need to update the title attribute in the narrow links which
# is not possible with `make_links_absolute()`.
for link_info in fragment.iterlinks():
elem, attrib, link, pos = link_info
match = re.match("/?#narrow/", link)
if match is not None:
link = re.sub(r"^/?#narrow/", base_url + "/#narrow/", link)
elem.set(attrib, link)
# Only manually linked narrow URLs have title attribute set.
if elem.get('title') is not None:
elem.set('title', link)
# Inline images can't be displayed in the emails as the request
# from the mail server can't be authenticated because it has no
# user_profile object linked to it. So we scrub the inline image
# container.
inline_image_containers = fragment.find_class("message_inline_image")
for container in inline_image_containers:
container.drop_tree()
# The previous block handles most inline images, but for messages
# where the entire markdown input was just the URL of an image
# (i.e. the entire body is a message_inline_image object), the
# entire message body will be that image element; here, we need a
# more drastic edit to the content.
if fragment.get('class') == 'message_inline_image':
content_template = '<p><a href="%s" target="_blank" title="%s">%s</a></p>'
image_link = fragment.find('a').get('href')
image_title = fragment.find('a').get('title')
new_content = (content_template % (image_link, image_title, image_link))
fragment = lxml.html.fromstring(new_content)
fragment.make_links_absolute(base_url)
content = lxml.html.tostring(fragment).decode("utf-8")
return content
def fix_emojis(content: str, base_url: str, emojiset: str) -> str:
def make_emoji_img_elem(emoji_span_elem: CSSSelector) -> Dict[str, Any]:
# Convert the emoji spans to img tags.
classes = emoji_span_elem.get('class')
match = re.search(r'emoji-(?P<emoji_code>\S+)', classes)
# re.search is capable of returning None,
# but since the parent function should only be called with a valid css element
# we assert that it does not.
assert match is not None
emoji_code = match.group('emoji_code')
emoji_name = emoji_span_elem.get('title')
alt_code = emoji_span_elem.text
image_url = base_url + '/static/generated/emoji/images-%(emojiset)s-64/%(emoji_code)s.png' % {
'emojiset': emojiset,
'emoji_code': emoji_code
}
img_elem = lxml.html.fromstring(
'<img alt="%(alt_code)s" src="%(image_url)s" title="%(title)s">' % {
'alt_code': alt_code,
'image_url': image_url,
'title': emoji_name,
})
img_elem.set('style', 'height: 20px;')
img_elem.tail = emoji_span_elem.tail
return img_elem
fragment = lxml.html.fromstring(content)
for elem in fragment.cssselect('span.emoji'):
parent = elem.getparent()
img_elem = make_emoji_img_elem(elem)
parent.replace(elem, img_elem)
for realm_emoji in fragment.cssselect('.emoji'):
del realm_emoji.attrib['class']
realm_emoji.set('style', 'height: 20px;')
content = lxml.html.tostring(fragment).decode('utf-8')
return content
def build_message_list(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]:
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = [] # type: List[Dict[str, Any]]
def sender_string(message: Message) -> str:
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def fix_plaintext_image_urls(content: str) -> str:
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def build_message_payload(message: Message) -> Dict[str, str]:
plain = message.content
plain = fix_plaintext_image_urls(plain)
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage. We can't use `relative_to_full_url()`
# function here because it uses a stricter regex which will not work for
# plain text.
plain = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", plain)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(user_profile.realm.uri, html)
html = fix_emojis(html, user_profile.realm.uri, user_profile.emojiset)
return {'plain': plain, 'html': html}
def build_sender_payload(message: Message) -> Dict[str, Any]:
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile: UserProfile, message: Message) -> Dict[str, Any]:
if message.recipient.type == Recipient.PERSONAL:
header = "You and %s" % (message.sender.full_name,)
html_link = personal_narrow_url(
realm=user_profile.realm,
sender=message.sender,
)
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
disp_recipient = get_display_recipient(message.recipient)
assert not isinstance(disp_recipient, str)
other_recipients = [r['full_name'] for r in disp_recipient
if r['id'] != user_profile.id]
header = "You and %s" % (", ".join(other_recipients),)
other_user_ids = [r['id'] for r in disp_recipient
if r['id'] != user_profile.id]
html_link = huddle_narrow_url(
realm=user_profile.realm,
other_user_ids=other_user_ids,
)
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
stream = Stream.objects.only('id', 'name').get(id=message.recipient.type_id)
header = "%s > %s" % (stream.name, message.topic_name())
stream_link = stream_narrow_url(user_profile.realm, stream)
topic_link = topic_narrow_url(user_profile.realm, stream, message.topic_name())
header_html = "<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, stream.name, topic_link, message.topic_name())
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile: UserProfile,
missed_messages: List[Dict[str, Any]],
message_count: int) -> None:
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of dictionaries to Message objects and other data
for a group of messages that share a recipient (and topic)
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg['message'].recipient_id, msg['message'].topic_name()) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and topic %r' %
recipients
)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update({
'name': user_profile.full_name,
'message_count': message_count,
'unsubscribe_link': unsubscribe_link,
'realm_name_in_notifications': user_profile.realm_name_in_notifications,
'show_message_content': user_profile.message_content_in_email_notifications,
})
triggers = list(message['trigger'] for message in missed_messages)
unique_triggers = set(triggers)
context.update({
'mention': 'mentioned' in unique_triggers,
'mention_count': triggers.count('mentioned'),
})
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update({
'reply_warning': False,
'reply_to_zulip': True,
})
else:
context.update({
'reply_warning': True,
'reply_to_zulip': False,
})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0]['message'])
if reply_to_address == FromAddress.NOREPLY:
reply_to_name = None
else:
reply_to_name = "Zulip"
senders = list(set(m['message'].sender for m in missed_messages))
if (missed_messages[0]['message'].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0]['message'].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, str)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
context.update({'group_pm': True})
if len(other_recipients) == 2:
huddle_display_name = "%s" % (" and ".join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif len(other_recipients) == 3:
huddle_display_name = "%s, %s, and %s" % (
other_recipients[0], other_recipients[1], other_recipients[2])
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = "%s, and %s others" % (
', '.join(other_recipients[:2]), len(other_recipients) - 2)
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0]['message'].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
elif context['mention']:
# Keep only the senders who actually mentioned the user
senders = list(set(m['message'].sender for m in missed_messages
if m['trigger'] == 'mentioned'))
# TODO: When we add wildcard mentions that send emails, we
# should make sure the right logic applies here.
elif ('stream_email_notify' in unique_triggers):
context.update({'stream_email_notify': True})
else:
raise AssertionError("Invalid messages!")
# If message content is disabled, then flush all information we pass to email.
if not user_profile.message_content_in_email_notifications:
context.update({
'reply_to_zulip': False,
'messages': [],
'sender_str': "",
'realm_str': user_profile.realm.name,
'huddle_display_name': "",
})
else:
context.update({
'messages': build_message_list(user_profile, list(m['message'] for m in missed_messages)),
'sender_str': ", ".join(sender.full_name for sender in senders),
'realm_str': user_profile.realm.name,
})
from_name = "Zulip missed messages" # type: str
from_address = FromAddress.NOREPLY
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
sender = senders[0]
from_name, from_address = (sender.full_name, sender.email)
context.update({
'reply_warning': False,
'reply_to_zulip': False,
})
email_dict = {
'template_prefix': 'zerver/emails/missed_message',
'to_user_id': user_profile.id,
'from_name': from_name,
'from_address': from_address,
'reply_to_email': formataddr((reply_to_name, reply_to_address)),
'context': context}
queue_json_publish("email_senders", email_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id: int,
missed_email_events: Iterable[Dict[str, Any]]) -> None:
message_ids = {event.get('message_id'): event.get('trigger') for event in missed_email_events}
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_email_notifications(user_profile):
return
messages = Message.objects.filter(usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
# We bucket messages by tuples that identify similar messages.
# For streams it's recipient_id and topic.
# For PMs it's recipient id and sender.
messages_by_bucket = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]
for msg in messages:
if msg.recipient.type == Recipient.PERSONAL:
# For PM's group using (recipient, sender).
messages_by_bucket[(msg.recipient_id, msg.sender_id)].append(msg)
else:
messages_by_bucket[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_bucket = {
bucket_tup: len(msgs)
for bucket_tup, msgs in messages_by_bucket.items()
}
for msg_list in messages_by_bucket.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.is_stream_message():
context_messages = get_context_for_message(msg)
filtered_context_messages = bulk_access_messages(user_profile, context_messages)
msg_list.extend(filtered_context_messages)
# Sort emails by least recently-active discussion.
bucket_tups = [] # type: List[Tuple[Tuple[int, str], int]]
for bucket_tup, msg_list in messages_by_bucket.items():
max_message_id = max(msg_list, key=lambda msg: msg.id).id
bucket_tups.append((bucket_tup, max_message_id))
bucket_tups = sorted(bucket_tups, key=lambda x: x[1])
# Send an email per bucket.
for bucket_tup, ignored_max_id in bucket_tups:
unique_messages = {}
for m in messages_by_bucket[bucket_tup]:
unique_messages[m.id] = dict(
message=m,
trigger=message_ids.get(m.id)
)
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_bucket[bucket_tup],
)
def clear_scheduled_invitation_emails(email: str) -> None:
"""Unlike most scheduled emails, invitation emails don't have an
existing user object to key off of, so we filter by address here."""
items = ScheduledEmail.objects.filter(address__iexact=email,
type=ScheduledEmail.INVITATION_REMINDER)
items.delete()
def clear_scheduled_emails(user_id: int, email_type: Optional[int]=None) -> None:
items = ScheduledEmail.objects.filter(user_id=user_id)
if email_type is not None:
items = items.filter(type=email_type)
items.delete()
def log_digest_event(msg: str) -> None:
import logging
import time
logging.Formatter.converter = time.gmtime
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
def followup_day2_email_delay(user: UserProfile) -> timedelta:
days_to_delay = 2
user_tz = user.timezone
if user_tz == '':
user_tz = 'UTC'
signup_day = user.date_joined.astimezone(pytz.timezone(user_tz)).isoweekday()
if signup_day == 5:
# If the day is Friday then delay should be till Monday
days_to_delay = 3
elif signup_day == 4:
# If the day is Thursday then delay should be till Friday
days_to_delay = 1
# The delay should be 1 hour before the above calculated delay as
# our goal is to maximize the chance that this email is near the top
# of the user's inbox when the user sits down to deal with their inbox,
# or comes in while they are dealing with their inbox.
return timedelta(days=days_to_delay, hours=-1)
def enqueue_welcome_emails(user: UserProfile, realm_creation: bool=False) -> None:
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_name = settings.WELCOME_EMAIL_SENDER['name']
from_address = settings.WELCOME_EMAIL_SENDER['email']
else:
from_name = None
from_address = FromAddress.SUPPORT
other_account_count = UserProfile.objects.filter(
email__iexact=user.email).exclude(id=user.id).count()
unsubscribe_link = one_click_unsubscribe_link(user, "welcome")
context = common_context(user)
context.update({
'unsubscribe_link': unsubscribe_link,
'keyboard_shortcuts_link': user.realm.uri + '/help/keyboard-shortcuts',
'realm_name': user.realm.name,
'realm_creation': realm_creation,
'email': user.email,
'is_realm_admin': user.is_realm_admin,
})
if user.is_realm_admin:
context['getting_started_link'] = (user.realm.uri +
'/help/getting-your-organization-started-with-zulip')
else:
context['getting_started_link'] = "https://zulipchat.com"
from zproject.backends import email_belongs_to_ldap, require_email_format_usernames
if email_belongs_to_ldap(user.realm, user.email) and not require_email_format_usernames(user.realm):
context["ldap_username"] = True
send_future_email(
"zerver/emails/followup_day1", user.realm, to_user_id=user.id, from_name=from_name,
from_address=from_address, context=context)
if other_account_count == 0:
send_future_email(
"zerver/emails/followup_day2", user.realm, to_user_id=user.id, from_name=from_name,
from_address=from_address, context=context, delay=followup_day2_email_delay(user))
def convert_html_to_markdown(html: str) -> str:
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode('utf-8'))[0].decode('utf-8').strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form `![](http://foo.com/image.png)`, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `![](http://foo.com/image.png?12345)` into
# `[image.png](http://foo.com/image.png)`.
return re.sub("!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
"[\\2](\\1/\\2)", markdown)
| [
"str",
"str",
"str",
"str",
"str",
"CSSSelector",
"UserProfile",
"List[Message]",
"Message",
"str",
"Message",
"Message",
"UserProfile",
"Message",
"UserProfile",
"List[Dict[str, Any]]",
"int",
"int",
"Iterable[Dict[str, Any]]",
"str",
"int",
"str",
"UserProfile",
"UserProfile",
"str"
] | [
1292,
1306,
3422,
3437,
3452,
3510,
5073,
5096,
5416,
5631,
5880,
6772,
6973,
6995,
10493,
10571,
10656,
16752,
16810,
19382,
19742,
19970,
20203,
21004,
22891
] | [
1295,
1309,
3425,
3440,
3455,
3521,
5084,
5109,
5423,
5634,
5887,
6779,
6984,
7002,
10504,
10591,
10659,
16755,
16834,
19385,
19745,
19973,
20214,
21015,
22894
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/onboarding.py |
from django.conf import settings
from zerver.lib.actions import set_default_streams, bulk_add_subscriptions, \
internal_prep_stream_message, internal_send_private_message, \
create_stream_if_needed, create_streams_if_needed, do_send_messages, \
do_add_reaction_legacy, create_users, missing_any_realm_internal_bots
from zerver.lib.topic import get_turtle_message
from zerver.models import Realm, UserProfile, Message, Reaction, get_system_bot
from typing import Any, Dict, List, Mapping
def setup_realm_internal_bots(realm: Realm) -> None:
"""Create this realm's internal bots.
This function is idempotent; it does nothing for a bot that
already exists.
"""
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.REALM_INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
bots = UserProfile.objects.filter(
realm=realm,
email__in=[bot_info[1] for bot_info in internal_bots],
bot_owner__isnull=True
)
for bot in bots:
bot.bot_owner = bot
bot.save()
def create_if_missing_realm_internal_bots() -> None:
"""This checks if there is any realm internal bot missing.
If that is the case, it creates the missing realm internal bots.
"""
if missing_any_realm_internal_bots():
for realm in Realm.objects.all():
setup_realm_internal_bots(realm)
def send_initial_pms(user: UserProfile) -> None:
organization_setup_text = ""
if user.is_realm_admin:
help_url = user.realm.uri + "/help/getting-your-organization-started-with-zulip"
organization_setup_text = ("* [Read the guide](%s) for getting your organization "
"started with Zulip\n" % (help_url,))
content = (
"Hello, and welcome to Zulip!\n\nThis is a private message from me, Welcome Bot. "
"Here are some tips to get you started:\n"
"* Download our [Desktop and mobile apps](/apps)\n"
"* Customize your account and notifications on your [Settings page](#settings)\n"
"* Type `?` to check out Zulip's keyboard shortcuts\n"
"%s"
"\n"
"The most important shortcut is `r` to reply.\n\n"
"Practice sending a few messages by replying to this conversation. If you're not into "
"keyboards, that's okay too; clicking anywhere on this message will also do the trick!") \
% (organization_setup_text,)
internal_send_private_message(user.realm, get_system_bot(settings.WELCOME_BOT),
user, content)
def setup_initial_streams(realm: Realm) -> None:
stream_dicts = [
{'name': "general"},
{'name': "new members",
'description': "For welcoming and onboarding new members. If you haven't yet, "
"introduce yourself in a new thread using your name as the topic!"},
{'name': "zulip",
'description': "For discussing Zulip, Zulip tips and tricks, and asking "
"questions about how Zulip works"}] # type: List[Mapping[str, Any]]
create_streams_if_needed(realm, stream_dicts)
set_default_streams(realm, {stream['name']: {} for stream in stream_dicts})
def send_initial_realm_messages(realm: Realm) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
# Make sure each stream created in the realm creation process has at least one message below
# Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home
# view slightly less overwhelming
welcome_messages = [
{'stream': Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
'topic': "welcome",
'content': "This is a message on stream `%s` with the topic `welcome`. We'll use this stream "
"for system-generated notifications." % (Realm.DEFAULT_NOTIFICATION_STREAM_NAME,)},
{'stream': Realm.INITIAL_PRIVATE_STREAM_NAME,
'topic': "private streams",
'content': "This is a private stream. Only admins and people you invite "
"to the stream will be able to see that this stream exists."},
{'stream': "general",
'topic': "welcome",
'content': "Welcome to #**general**."},
{'stream': "new members",
'topic': "onboarding",
'content': "A #**new members** stream is great for onboarding new members.\n\nIf you're "
"reading this and aren't the first person here, introduce yourself in a new thread "
"using your name as the topic! Type `c` or click on `New Topic` at the bottom of the "
"screen to start a new topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "Here is a message in one topic. Replies to this message will go to this topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "A second message in this topic. With [turtles](/static/images/cute/turtle.png)!"},
{'stream': "zulip",
'topic': "second topic",
'content': "This is a message in a second topic.\n\nTopics are similar to email subjects, "
"in that each conversation should get its own topic. Keep them short, though; one "
"or two words will do it!"},
] # type: List[Dict[str, str]]
messages = [internal_prep_stream_message(
realm, welcome_bot,
message['stream'], message['topic'], message['content']) for message in welcome_messages]
message_ids = do_send_messages(messages)
# We find the one of our just-sent messages with turtle.png in it,
# and react to it. This is a bit hacky, but works and is kinda a
# 1-off thing.
turtle_message = get_turtle_message(message_ids=message_ids)
do_add_reaction_legacy(welcome_bot, turtle_message, 'turtle')
| [
"Realm",
"UserProfile",
"Realm",
"Realm"
] | [
540,
1498,
2691,
3313
] | [
545,
1509,
2696,
3318
] |
archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip | zerver/lib/openapi.py | # Set of helper functions to manipulate the OpenAPI files that define our REST
# API's specification.
import os
from typing import Any, Dict, List, Optional
OPENAPI_SPEC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'../openapi/zulip.yaml'))
# A list of exceptions we allow when running validate_against_openapi_schema.
# The validator will ignore these keys when they appear in the "content"
# passed.
EXCLUDE_PROPERTIES = {
'/register': {
'post': {
'200': ['max_message_id', 'realm_emoji']
}
}
}
class OpenAPISpec():
def __init__(self, path: str) -> None:
self.path = path
self.last_update = None # type: Optional[float]
self.data = None
def reload(self) -> None:
# Because importing yamole (and in turn, yaml) takes
# significant time, and we only use python-yaml for our API
# docs, importing it lazily here is a significant optimization
# to `manage.py` startup.
#
# There is a bit of a race here...we may have two processes
# accessing this module level object and both trying to
# populate self.data at the same time. Hopefully this will
# only cause some extra processing at startup and not data
# corruption.
from yamole import YamoleParser
with open(self.path) as f:
yaml_parser = YamoleParser(f)
self.data = yaml_parser.data
self.last_update = os.path.getmtime(self.path)
def spec(self) -> Dict[str, Any]:
"""Reload the OpenAPI file if it has been modified after the last time
it was read, and then return the parsed data.
"""
last_modified = os.path.getmtime(self.path)
# Using != rather than < to cover the corner case of users placing an
# earlier version than the current one
if self.last_update != last_modified:
self.reload()
assert(self.data)
return self.data
class SchemaError(Exception):
pass
openapi_spec = OpenAPISpec(OPENAPI_SPEC_PATH)
def get_openapi_fixture(endpoint: str, method: str,
response: Optional[str]='200') -> Dict[str, Any]:
"""Fetch a fixture from the full spec object.
"""
return (openapi_spec.spec()['paths'][endpoint][method.lower()]['responses']
[response]['content']['application/json']['schema']
['example'])
def get_openapi_parameters(endpoint: str,
method: str) -> List[Dict[str, Any]]:
return (openapi_spec.spec()['paths'][endpoint][method.lower()]['parameters'])
def validate_against_openapi_schema(content: Dict[str, Any], endpoint: str,
method: str, response: str) -> None:
"""Compare a "content" dict with the defined schema for a specific method
in an endpoint.
"""
schema = (openapi_spec.spec()['paths'][endpoint][method.lower()]['responses']
[response]['content']['application/json']['schema'])
exclusion_list = (EXCLUDE_PROPERTIES.get(endpoint, {}).get(method, {})
.get(response, []))
for key, value in content.items():
# Ignore in the validation the keys in EXCLUDE_PROPERTIES
if key in exclusion_list:
continue
# Check that the key is defined in the schema
if key not in schema['properties']:
raise SchemaError('Extraneous key "{}" in the response\'s '
'content'.format(key))
# Check that the types match
expected_type = to_python_type(schema['properties'][key]['type'])
actual_type = type(value)
if expected_type is not actual_type:
raise SchemaError('Expected type {} for key "{}", but actually '
'got {}'.format(expected_type, key, actual_type))
# Check that at least all the required keys are present
for req_key in schema['required']:
if req_key not in content.keys():
raise SchemaError('Expected to find the "{}" required key')
def to_python_type(py_type: str) -> type:
"""Transform an OpenAPI-like type to a Pyton one.
https://swagger.io/docs/specification/data-models/data-types
"""
TYPES = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict
}
return TYPES[py_type]
| [
"str",
"str",
"str",
"str",
"str",
"Dict[str, Any]",
"str",
"str",
"str",
"str"
] | [
613,
2114,
2127,
2471,
2511,
2669,
2695,
2744,
2759,
4140
] | [
616,
2117,
2130,
2474,
2514,
2683,
2698,
2747,
2762,
4143
] |