class PlanningService(ProdApiService):
excluded_fields = {
"_id",
"item_class",
"flags",
"lock_user",
"lock_time",
"lock_session",
} | ProdApiService.excluded_fields
class EventsService(ProdApiService):
excluded_fields = {
"_id",
"lock_action",
"lock_user",
"lock_time",
"lock_session",
} | ProdApiService.excluded_fields
class AssignmentsService(ProdApiService):
excluded_fields = {
"lock_action",
"lock_user",
"lock_time",
} | ProdApiService.excluded_fields
class EventsHistoryService(ProdApiService):
excluded_fields = {
"update._etag",
"update._links",
"update._status",
"update._updated",
"update._created",
"update._id",
} | ProdApiService.excluded_fields
class EventsFilesService(ProdApiService):
pass
#! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see .
"""
This file is to be imported by every Shinken service component:
Arbiter, Scheduler, etc. It just checks for the main requirement of
Shinken.
"""
import sys
VERSION = "2.4.3"
# Make sure people are using Python 2.6 or higher
# This is the canonical python version check
if sys.version_info < (2, 6):
sys.exit("Shinken requires as a minimum Python 2.6.x, sorry")
elif sys.version_info >= (3,):
sys.exit("Shinken is not yet compatible with Python 3.x, sorry")
"""
Copyright (C) 2019 Quinn D Granfor
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
from os import walk # pylint: disable=C0412
from guessit import guessit
media_files = 0
for root, dirs, files in walk('X:\\zz_movie'):
for file_name_loop in files:
filename, file_extension = os.path.splitext(file_name_loop)
if file_extension in ('.mkv', '.mp4', '.iso'):
guessit_name = guessit(file_name_loop)
if 'title' in guessit_name:
if 'year' in guessit_name:
media_files += 1
print(filename, ':',
guessit_name['title'] + ' (' + str(guessit_name['year']) + ')', flush=True)
user_answer = input('Should I rename/move it?')
if user_answer == 'y':
os.rename(os.path.join(root, file_name_loop), os.path.join(
'X:\\zz_movie', guessit_name['title']
+ ' (' + str(
guessit_name['year']) + ')' + file_extension))
# print(os.path.join(root, file_name_loop), flush=True)
else:
print(filename, flush=True)
print(root, flush=True)
guessit_name = guessit(root)
if 'title' in guessit_name:
if 'year' in guessit_name:
media_files += 1
print(root, ':',
guessit_name['title'] + ' (' + str(guessit_name['year']) + ')', flush=True)
user_answer = input('Should I rename/move it?')
if user_answer == 'y':
os.rename(os.path.join(root, filename + file_extension), os.path.join(
'X:\\zz_movie', guessit_name['title']
+ ' (' + str(
guessit_name['year']) + ')' + file_extension))
print(media_files, flush=True)
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Jason Swails, Matthew Harrigan
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see .
##############################################################################
from __future__ import print_function, division
import numpy as np
import re, os, tempfile
from mdtraj.formats.pdb import pdbstructure
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.testing import eq
from mdtraj import load, load_pdb
from mdtraj import Topology
import pytest
import warnings
fd, temp = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.unlink(temp)
def test_pdbread(get_fn):
pdb = get_fn('native.pdb')
p = load(pdb)
def test_pdbwrite(get_fn):
pdb = get_fn('native.pdb')
p = load(pdb)
p.save(temp)
r = load(temp)
eq(p.xyz, r.xyz)
def test_load_multiframe(get_fn):
with open(get_fn('multiframe.pdb')) as f:
pdb = PdbStructure(f)
assert eq(len(pdb.models), 2)
assert eq(len(pdb.models[0].chains), 1)
assert eq(len(pdb.models[0].chains[0].residues), 3)
assert eq(len(list(pdb.models[0].iter_atoms())), 22)
assert eq(len(pdb.models[1].chains), 1)
assert eq(len(pdb.models[1].chains[0].residues), 3)
assert eq(len(list(pdb.models[1].iter_atoms())), 22)
t = load(get_fn('multiframe.pdb'))
assert eq(t.n_frames, 2)
assert eq(t.n_atoms, 22)
assert eq(t.xyz[0], t.xyz[1])
def test_4ZUO(get_fn):
t = load(get_fn('4ZUO.pdb'))
eq(t.n_frames, 1)
eq(t.n_atoms, 6200)
# this is a random line from the file
#ATOM 1609 O GLY A 201 -25.423 13.774 -25.234 1.00 8.92 O
atom = list(t.top.atoms)[1525]
eq(atom.element.symbol, 'O')
eq(atom.residue.name, 'GLY')
eq(atom.index, 1525)
eq(t.xyz[0, 1525], np.array([-25.423, 13.774, -25.234]) / 10) # converting to NM
# this is atom 1577 in the PDB
#CONECT 1577 5518
#ATOM 1577 O HIS A 197 -18.521 9.724 -32.805 1.00 8.81 O
#HETATM 5518 K K A 402 -19.609 10.871 -35.067 1.00 9.11 K
atom = list(t.top.atoms)[1493]
eq(atom.name, 'O')
eq(atom.residue.name, 'HIS')
eq([(a1.index, a2.index) for a1, a2 in t.top.bonds if a1.index == 1493 or a2.index == 1493],
[(1492, 1493), (1493, 5129)])
# that first bond is from a conect record
def test_2EQQ_0(get_fn):
# this is an nmr structure with 20 models
t = load(get_fn('2EQQ.pdb'))
assert eq(t.n_frames, 20)
assert eq(t.n_atoms, 423)
assert eq(len(list(t.top.residues)), 28)
def test_1vii_solvated_with_ligand(get_fn):
traj = load(get_fn("1vii_sustiva_water.pdb"))
eq(len(list(traj.top.bonds)), 5156)
eq(len([bond for bond in traj.top.bonds if bond[0].residue.name == 'LIG']), 32)
traj.save(temp)
traj = load(temp)
eq(len(list(traj.top.bonds)), 5156)
eq(len([bond for bond in traj.top.bonds if bond[0].residue.name == 'LIG']), 32)
def test_write_large(get_fn):
traj = load(get_fn('native.pdb'))
traj.xyz.fill(123456789)
with pytest.raises(ValueError):
traj.save(temp)
def test_write_large_2(get_fn):
traj = load(get_fn('native.pdb'))
traj.xyz.fill(-123456789)
with pytest.raises(ValueError):
traj.save(temp)
def test_pdbstructure_0():
pdb_lines = [
"ATOM 188 N CYS A 42 40.714 -5.292 12.123 1.00 11.29 N ",
"ATOM 189 CA CYS A 42 39.736 -5.883 12.911 1.00 10.01 C ",
"ATOM 190 C CYS A 42 40.339 -6.654 14.087 1.00 22.28 C ",
"ATOM 191 O CYS A 42 41.181 -7.530 13.859 1.00 13.70 O ",
"ATOM 192 CB CYS A 42 38.949 -6.825 12.002 1.00 9.67 C ",
"ATOM 193 SG CYS A 42 37.557 -7.514 12.922 1.00 20.12 S "
]
res = pdbstructure.Residue("CYS", 42)
for l in pdb_lines:
res._add_atom(pdbstructure.Atom(l))
for i, atom in enumerate(res):
eq(pdb_lines[i], str(atom))
def test_pdbstructure_1():
pdb_lines = [
"ATOM 188 N CYS A 42 40.714 -5.292 12.123 1.00 11.29 N",
"ATOM 189 CA CYS A 42 39.736 -5.883 12.911 1.00 10.01 C",
"ATOM 190 C CYS A 42 40.339 -6.654 14.087 1.00 22.28 C",
"ATOM 191 O CYS A 42 41.181 -7.530 13.859 1.00 13.70 O",
"ATOM 192 CB CYS A 42 38.949 -6.825 12.002 1.00 9.67 C",
"ATOM 193 SG CYS A 42 37.557 -7.514 12.922 1.00 20.12 S"
]
positions = np.array([
[ 40.714, -5.292, 12.123],
[ 39.736, -5.883, 12.911],
[ 40.339, -6.654, 14.087],
[ 41.181, -7.53, 13.859],
[ 38.949, -6.825, 12.002],
[ 37.557, -7.514, 12.922]
])
res = pdbstructure.Residue("CYS", 42)
for l in pdb_lines:
res._add_atom(pdbstructure.Atom(l))
for i, c in enumerate(res.iter_positions()):
eq(c, positions[i])
def test_pdbstructure_2():
atom = pdbstructure.Atom("ATOM 2209 CB TYR A 299 6.167 22.607 20.046 1.00 8.12 C")
expected = np.array([6.167, 22.607, 20.046])
for i, c in enumerate(atom.iter_coordinates()):
eq(expected[i], c)
def test_pdbstructure_3():
loc = pdbstructure.Atom.Location(' ', [1,2,3], 1.0, 20.0, "XXX")
expected = [1, 2, 3]
for i, c in enumerate(loc):
eq(expected[i], c)
def test_pdb_from_url():
# load pdb from URL
t1 = load_pdb('http://www.rcsb.org/pdb/files/4ZUO.pdb.gz')
t2 = load_pdb('http://www.rcsb.org/pdb/files/4ZUO.pdb')
eq(t1.n_frames, 1)
eq(t2.n_frames, 1)
eq(t1.n_atoms, 6200)
eq(t2.n_atoms, 6200)
def test_3nch_conect(get_fn):
# This has conect entries that use all available digits, good failure case.
t1 = load_pdb(get_fn('3nch.pdb.gz'))
top, bonds = t1.top.to_dataframe()
bonds = dict(((a, b), 1) for (a, b, _, _) in bonds)
eq(bonds[19782, 19783], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19784], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19785], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19786], 1) # Check that last SO4 molecule has right bonds
def test_3nch_serial_resSeq(get_fn):
# If you use zero-based indexing, this PDB has quite large gaps in residue and atom numbering, so it's a good test case. See #528
# Gold standard values obtained via
# cat 3nch.pdb |grep ATM|tail -n 5
# HETATM19787 S SO4 D 804 -4.788 -9.395 22.515 1.00121.87 S
# HETATM19788 O1 SO4 D 804 -3.815 -9.511 21.425 1.00105.97 O
# HETATM19789 O2 SO4 D 804 -5.989 -8.733 21.999 1.00116.13 O
# HETATM19790 O3 SO4 D 804 -5.130 -10.726 23.043 1.00108.74 O
# HETATM19791 O4 SO4 D 804 -4.210 -8.560 23.575 1.00112.54 O
t1 = load_pdb(get_fn('3nch.pdb.gz'))
top, bonds = t1.top.to_dataframe()
top2 = Topology.from_dataframe(top, bonds)
eq(t1.top, top2)
top = top.set_index('serial') # Index by the actual data in the PDB
eq(str(top.ix[19791]["name"]), "O4")
eq(str(top.ix[19787]["name"]), "S")
eq(str(top.ix[19787]["resName"]), "SO4")
eq(int(top.ix[19787]["resSeq"]), 804)
def test_1ncw(get_fn):
t1 = load_pdb(get_fn('1ncw.pdb.gz'))
def test_1vii_url_and_gz(get_fn):
t1 = load_pdb('http://www.rcsb.org/pdb/files/1vii.pdb.gz')
t2 = load_pdb('http://www.rcsb.org/pdb/files/1vii.pdb')
t3 = load_pdb(get_fn('1vii.pdb.gz'))
t4 = load_pdb(get_fn('1vii.pdb'))
eq(t1.n_frames, 1)
eq(t1.n_frames, t2.n_frames)
eq(t1.n_frames, t3.n_frames)
eq(t1.n_frames, t4.n_frames)
eq(t1.n_atoms, t2.n_atoms)
eq(t1.n_atoms, t3.n_atoms)
eq(t1.n_atoms, t4.n_atoms)
def test_segment_id(get_fn):
pdb = load_pdb(get_fn('ala_ala_ala.pdb'))
pdb.save_pdb(temp)
pdb2 = load_pdb(temp)
correct_segment_id = 'AAL'
# check that all segment ids are set correctly
for ridx,r in enumerate(pdb.top.residues):
assert r.segment_id == correct_segment_id, "residue %i (0-indexed) does not have segment_id set correctly from ala_ala_ala.pdb"%(ridx)
# check that all segment ids are set correctly after a new pdb file is written
for ridx,(r1,r2) in enumerate(zip(pdb.top.residues,pdb2.top.residues)):
assert r1.segment_id == r2.segment_id, "segment_id of residue %i (0-indexed) in ala_ala_ala.pdb does not agree with value in after being written out to a new pdb file"%(ridx)
def test_bfactors(get_fn):
pdb = load_pdb(get_fn('native.pdb'))
bfactors0 = np.arange(pdb.n_atoms) / 2.0 - 4.0 # (Get some decimals..)
pdb.save_pdb(temp, bfactors=bfactors0)
with open(temp, 'r') as fh:
atom_lines = [line for line in fh.readlines() if re.search(r'^ATOM', line)]
str_bfactors1 = [l[60:66] for l in atom_lines]
flt_bfactors1 = np.array([float(i) for i in str_bfactors1])
# check formatting has a space at the beginning and not at the end
frmt = np.array([(s[0] == ' ') and (s[-1] != ' ') for s in str_bfactors1])
assert np.all(frmt)
# make sure the numbers are actually the same
eq(bfactors0, flt_bfactors1)
def test_hex(get_fn):
pdb = load_pdb(get_fn('water_hex.pdb.gz'))
assert pdb.n_atoms == 100569
assert pdb.n_residues == 33523
pdb.save(temp)
def test_dummy_pdb_box_detection(get_fn):
with warnings.catch_warnings(record=True) as war:
traj = load(get_fn('2koc.pdb'))
assert 'Unlikely unit cell' in str(war[0].message)
assert traj.unitcell_lengths is None, 'Expected dummy box to be deleted'
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
# Copyright (C) 2001-2007 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import os
import sys
import time
import base64
import difflib
import unittest
import warnings
from cStringIO import StringIO
import email
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.test_support import findfile, run_unittest
from email.test import __file__ as landmark
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
def openfile(filename, mode='r'):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, mode)
# Base test class
class TestEmailBase(unittest.TestCase):
def ndiffAssertEqual(self, first, second):
"""Like assertEqual except use ndiff for readable output."""
if first != second:
sfirst = str(first)
ssecond = str(second)
diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
fp = StringIO()
print >> fp, NL, NL.join(diff)
raise self.failureException, fp.getvalue()
def _msgobj(self, filename):
fp = openfile(findfile(filename))
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), 'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), 'foo')
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
fp = openfile('msg_17.txt')
try:
text = fp.read()
finally:
fp.close()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertTrue('from' in msg)
self.assertTrue('From' in msg)
self.assertTrue('FROM' in msg)
self.assertTrue('to' in msg)
self.assertTrue('To' in msg)
self.assertTrue('TO' in msg)
def test_as_string(self):
eq = self.assertEqual
msg = self._msgobj('msg_01.txt')
fp = openfile('msg_01.txt')
try:
# BAW 30-Mar-2009 Evil be here. So, the generator is broken with
# respect to long line breaking. It's also not idempotent when a
# header from a parsed message is continued with tabs rather than
# spaces. Before we fixed bug 1974 it was reversedly broken,
# i.e. headers that were continued with spaces got continued with
# tabs. For Python 2.x there's really no good fix and in Python
# 3.x all this stuff is re-written to be right(er). Chris Withers
# convinced me that using space as the default continuation
# character is less bad for more applications.
text = fp.read().replace('\t', ' ')
finally:
fp.close()
self.ndiffAssertEqual(text, msg.as_string())
fullrepr = str(msg)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
eq(text, NL.join(lines[1:]))
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
def test_has_key(self):
msg = email.message_from_string('Header: exists')
self.assertTrue(msg.has_key('header'))
self.assertTrue(msg.has_key('Header'))
self.assertTrue(msg.has_key('HEADER'))
self.assertFalse(msg.has_key('headeri'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True), x)
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# With no explicit _charset its us-ascii, and all are 7-bit
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8-bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], '8bit')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr)
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=
""")
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<%d@dom.ain>' % i for i in range(10)])
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for ; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for ; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for ; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor ;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor ;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor ;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
\thelo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
msg['Subject'] = h
eq(msg.as_string(), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
=?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte '
eq(msg.as_string(), """\
Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = '"Someone Test #A" ,,"Someone Test #B" , "Someone Test #C" , "Someone Test #D" '
msg = Message()
msg['To'] = to
eq(msg.as_string(0), '''\
To: "Someone Test #A" , ,
"Someone Test #B" ,
"Someone Test #C" ,
"Someone Test #D"
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_?=
=?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
=?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
=?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
self.ndiffAssertEqual(msg.as_string(), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
\tWed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = '<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
msg = Message()
msg['Received'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received'] = h
self.ndiffAssertEqual(msg.as_string(), """\
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
eq(msg.as_string(), """\
Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
Wed, 16 Oct 2002 07:41:11 -0700'''
msg = email.message_from_string(m)
eq(msg.as_string(), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = """\
List-Unsubscribe: ,
"""
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
self.ndiffAssertEqual(msg.as_string(), """\
List: List-Unsubscribe: ,
List: List-Unsubscribe: ,
""")
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
# Make sure we pick up the audiotest.au that lives in email/test/data.
# In Python, there's an audiotest.au living in Lib/test but that isn't
# included in some binary distros that don't include the test
# package. The trailing empty string on the .join() is significant
# since findfile() will do a dirname().
datadir = os.path.join(os.path.dirname(landmark), 'data', '')
fp = open(findfile('audiotest.au', datadir), 'rb')
try:
self._audiodata = fp.read()
finally:
fp.close()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodestring(payload), self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.assertTrue
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
unless(self._au.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._au.get_param('foobar', missing) is missing)
unless(self._au.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
self._imgdata = fp.read()
finally:
fp.close()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodestring(payload), self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
unless = self.assertTrue
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
unless(self._im.get_param('foo', failobj=missing,
header='content-disposition') is missing)
# Try some missing stuff
unless(self._im.get_param('foobar', missing) is missing)
unless(self._im.get_param('attachment', missing,
header='foobar') is missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication('\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytes = '\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytes)
eq(msg.get_payload(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytes)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
unless = self.assertTrue
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
unless(self._msg.get_param('foobar', missing) is missing)
unless(self._msg.get_param('charset', missing, header='foobar')
is missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertTrue(not self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
data = fp.read()
finally:
fp.close()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry '
container['To'] = 'Dingus Lovers '
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs // 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
unless = self.assertTrue
raises = self.assertRaises
# tests
m = self._msg
unless(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
unless(m0 is self._txt)
unless(m1 is self._im)
eq(m.get_payload(), [m0, m1])
unless(not m0.is_multipart())
unless(not m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
unless = self.assertTrue
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
unless(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
unless(isinstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect))
def test_multipart_no_boundary(self):
unless = self.assertTrue
msg = self._msgobj('msg_25.txt')
unless(isinstance(msg.get_payload(), str))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
errors.MultipartInvariantViolationDefect))
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
unless = self.assertTrue
msg = self._msgobj('msg_41.txt')
unless(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
unless(isinstance(msg.defects[1],
errors.MultipartInvariantViolationDefect))
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertTrue(isinstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect))
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nLine 2\nLine 3'
msg = email.message_from_string(m)
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
self.assertTrue(isinstance(msg.defects[0],
errors.FirstHeaderLineIsContinuationDefect))
eq(msg.defects[0].line, ' Line 1\n')
# Test RFC 2047 header encoding and decoding
class TestRFC2047(unittest.TestCase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
('Re:', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
('baz foo bar', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
eq(str(make_header(dh)),
"""Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
=?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
def test_whitespace_eater_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard '
dh = decode_header(s)
eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard ', None)])
hu = unicode(make_header(dh)).encode('latin-1')
eq(hu, 'Andr\xe9 Pirard ')
def test_whitespace_eater_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [('The', None), ('quick brown fox', 'iso-8859-1'),
('jumped over the', None), ('lazy dog', 'iso-8859-1')])
hu = make_header(dh).__unicode__()
eq(hu, u'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(s, None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [('Sm', None), ('\xf6', 'iso-8859-1'),
('rg', None), ('\xe5', 'iso-8859-1'),
('sbord', None)])
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
fp = openfile('msg_11.txt')
try:
self._text = fp.read()
finally:
fp.close()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
unless = self.assertTrue
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subpart = payload[0]
unless(subpart is m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
eq = self.assertEqual
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
unless = self.assertTrue
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
submsg = payload[0]
self.assertTrue(isinstance(submsg, Message))
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
unless = self.assertTrue
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
unless(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry"
To: SoCal Raves
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
unless(isinstance(dsn1, Message))
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
unless(isinstance(dsn2, Message))
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
subsubpart = payload[0]
unless(isinstance(subsubpart, Message))
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
fp = openfile('msg_21.txt')
try:
text = fp.read()
finally:
fp.close()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
fp = openfile('msg_30.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
data = fp.read()
finally:
fp.close()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
unless = self.assertTrue
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.\n')
eq(msg.epilogue, '\n')
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda\n')
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertTrue(isinstance(msg3, Message))
payload = msg3.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg4 = payload[0]
unless(isinstance(msg4, Message))
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEqual
unless = self.assertTrue
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
unless(isinstance(payload, list))
eq(len(payload), 1)
msg1 = payload[0]
self.assertTrue(isinstance(msg1, Message))
eq(msg1.get_content_type(), 'text/plain')
self.assertTrue(isinstance(msg1.get_payload(), str))
eq(msg1.get_payload(), '\n')
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
finally:
fp.close()
def test_message_from_string_with_class(self):
unless = self.assertTrue
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test_message_from_file_with_class(self):
unless = self.assertTrue
# Create a subclass
class MyMessage(Message):
pass
fp = openfile('msg_01.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
unless(isinstance(msg, MyMessage))
# Try something more complicated
fp = openfile('msg_02.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
for subpart in msg.walk():
unless(isinstance(subpart, MyMessage))
def test__all__(self):
module = __import__('email')
# Can't use sorted() here due to Python 2.3 compatibility
all = module.__all__[:]
all.sort()
self.assertEqual(all, [
# Old names
'Charset', 'Encoders', 'Errors', 'Generator',
'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
'MIMENonMultipart', 'MIMEText', 'Message',
'Parser', 'Utils', 'base64MIME',
# new names
'base64mime', 'charset', 'encoders', 'errors', 'generator',
'header', 'iterators', 'message', 'message_from_file',
'message_from_string', 'mime', 'parser',
'quopriMIME', 'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
def test_parsedate_none(self):
self.assertEqual(utils.parsedate(''), None)
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person ')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A \(Very\) Silly Person" ')
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" ')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_name_with_dot(self):
x = 'John X. Doe '
y = '"John X. Doe" '
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar """
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" ')
def test_fix_eols(self):
eq = self.assertEqual
eq(utils.fix_eols('hello'), 'hello')
eq(utils.fix_eols('hello\n'), 'hello\r\n')
eq(utils.fix_eols('hello\r'), 'hello\r\n')
eq(utils.fix_eols('hello\r\n'), 'hello\r\n')
eq(utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person ']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" ']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) '])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), 'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
fp = openfile('msg_19.txt')
try:
neq(EMPTYSTRING.join(lines), fp.read())
finally:
fp.close()
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
fp = openfile('msg_02.txt')
try:
msg = HeaderParser().parse(fp)
finally:
fp.close()
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertTrue(isinstance(msg.get_payload(), str))
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
fp = openfile('msg_26.txt', mode='rb')
try:
msg = Parser().parse(fp)
finally:
fp.close()
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.base64_len('hello'),
len(base64mime.encode('hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.base64_len('x'*size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), '')
eq(base64mime.decode('aGVsbG8='), 'hello')
eq(base64mime.decode('aGVsbG8=', 'X'), 'hello')
eq(base64mime.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.encode(''), '')
eq(base64mime.encode('hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.encode('hello\n'), 'aGVsbG8K\n')
eq(base64mime.encode('hello\n', 0), 'aGVsbG8NCg==\n')
# Test the maxlinelen arg
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True),
'=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
class TestQuopri(unittest.TestCase):
def setUp(self):
self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
[chr(x) for x in range(ord('A'), ord('Z')+1)] + \
[chr(x) for x in range(ord('0'), ord('9')+1)] + \
['!', '*', '+', '-', '/', ' ']
self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
self.blit.remove('=')
self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_header_quopri_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_quopri_check(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_quopri_check(c))
def test_body_quopri_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_quopri_check(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_quopri_check(c))
def test_header_quopri_len(self):
eq = self.assertEqual
hql = quoprimime.header_quopri_len
enc = quoprimime.header_encode
for s in ('hello', 'h@e@l@l@o@'):
# Empty charset and no line-endings. 7 == RFC chrome
eq(hql(s), len(enc(s, charset='', eol=''))-7)
for c in self.hlit:
eq(hql(c), 1)
for c in self.hnon:
eq(hql(c), 3)
def test_body_quopri_len(self):
eq = self.assertEqual
bql = quoprimime.body_quopri_len
for c in self.blit:
eq(bql(c), 1)
for c in self.bnon:
eq(bql(c), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def test_header_encode(self):
eq = self.assertEqual
he = quoprimime.header_encode
eq(he('hello'), '=?iso-8859-1?q?hello?=')
eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
# Test a non-ASCII character
eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
def test_decode(self):
eq = self.assertEqual
eq(quoprimime.decode(''), '')
eq(quoprimime.decode('hello'), 'hello')
eq(quoprimime.decode('hello', 'X'), 'hello')
eq(quoprimime.decode('hello\nworld', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.encode(''), '')
eq(quoprimime.encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.encode('hello\r\nworld'), 'hello\nworld')
eq(quoprimime.encode('hello\r\nworld', 0), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_idempotent(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
s = 'Hello World!'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
# test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
try:
eq('\x1b$B5FCO;~IW\x1b(B',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
except LookupError:
# We probably don't have the Japanese codecs installed
pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None)
c = Charset('fake')
eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
def test_unicode_charset_name(self):
charset = Charset(u'us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertTrue(len(l) <= 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode()
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
=?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
=?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
=?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
=?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
=?utf-8?b?44CC?=""")
eq(decode_header(enc),
[(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
(utf8_head, "utf-8")])
ustr = unicode(h)
eq(ustr.encode('utf-8'),
'Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, enc)
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, '=?iso-8859-1?q?foo?=')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, '=?iso-8859-1?q?hello?=')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header(u'p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
eq(str(Header(x, errors='replace')), x)
h.append(x, errors='replace')
eq(str(h), x)
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_eater(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
self.ndiffAssertEqual(msg.as_string(), """\
Return-Path:
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(), """\
Return-Path:
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
u'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "Frank's Document")
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# -*- coding: utf-8 -*-
from micolog_plugin import *
import logging,re
from google.appengine.api import mail
from model import *
from google.appengine.api import users
from base import BaseRequestHandler,urldecode
from google.appengine.ext.webapp import template
SBODY='''New comment on your post "%(title)s"
Author : %(author)s
E-mail : %(email)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
BBODY='''Hi~ New reference on your comment for post "%(title)s"
Author : %(author)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
class NotifyHandler(BaseRequestHandler):
def __init__(self):
BaseRequestHandler.__init__(self)
self.current="config"
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def get(self):
self.template_vals.update({'self':self})
content=template.render('plugins/sys_plugin/setup.html',self.template_vals)
self.render2('views/admin/setup_base.html',{'m_id':'sysplugin_notify','content':content})
#Also you can use:
#self.render2('plugins/sys_plugin/setup2.html',{'m_id':'sysplugin_notify','self':self})
def post(self):
self.bbody=self.param('bbody')
self.sbody=self.param('sbody')
self.blog.comment_notify_mail=self.parambool('comment_notify_mail')
self.blog.put()
OptionSet.setValue('sys_plugin_sbody',self.sbody)
OptionSet.setValue('sys_plugin_bbody',self.bbody)
self.get()
class sys_plugin(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="System plugin for micolog"
self.name="Sys Plugin"
self.version="0.2"
self.blocklist=OptionSet.getValue("sys_plugin_blocklist",default="")
self.register_filter('head',self.head)
self.register_filter('footer',self.footer)
self.register_urlmap('sys_plugin/setup',self.setup)
self.register_urlhandler('/admin/sys_plugin/notify',NotifyHandler)
self.register_setupmenu('sysplugin_notify',_('Notify'),'/admin/sys_plugin/notify')
self.register_action('pre_comment',self.pre_comment)
self.register_action('save_comment',self.save_comment)
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def head(self,content,blog=None,*arg1,**arg2):
content=content+''%blog.version
return content
def footer(self,content,blog=None,*arg1,**arg2):
return content+''%blog.version
def setup(self,page=None,*arg1,**arg2):
if not page.is_login:
page.redirect(users.create_login_url(page.request.uri))
tempstr='''
blocklist:
'''
if page.request.method=='GET':
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
else:
self.blocklist=page.param("ta_list")
OptionSet.setValue("sys_plugin_blocklist",self.blocklist)
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
def get(self,page):
return '''Sys Plugin
This is a system plugin for micolog.
Also a demo for how to write plugin for micolog.
feature
- Add Meta <meta name="generator" content="Micolog x.x" />
- Add footer "<!--Powered by micolog x.x-->"
- Comments Filter with blocklist Setup
- Comment Notify Setup
'''
def pre_comment(self,comment,*arg1,**arg2):
for s in self.blocklist.splitlines():
if comment.content.find(s)>-1:
raise Exception
def save_comment(self,comment,*arg1,**arg2):
if self.blog.comment_notify_mail:
self.notify(comment)
def notify(self,comment):
try:
sbody=self.sbody.decode('utf-8')
except:
sbody=self.sbody
try:
bbody=self.bbody.decode('utf-8')
except:
bbody=self.bbody
if self.blog.comment_notify_mail and self.blog.owner and not users.is_current_user_admin() :
sbody=sbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
mail.send_mail_to_admins(self.blog.owner.email(),'Comments:'+comment.entry.title, sbody,reply_to=comment.email)
#reply comment mail notify
refers = re.findall(r'#comment-(\d+)', comment.content)
if len(refers)!=0:
replyIDs=[int(a) for a in refers]
commentlist=comment.entry.comments()
emaillist=[c.email for c in commentlist if c.reply_notify_mail and c.key().id() in replyIDs]
emaillist = {}.fromkeys(emaillist).keys()
for refer in emaillist:
if self.blog.owner and mail.is_email_valid(refer):
emailbody = bbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
message = mail.EmailMessage(sender = self.blog.owner.email(),subject = 'Comments:'+comment.entry.title)
message.to = refer
message.body = emailbody
message.send()
'''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class ShareVidResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "sharevid"
domains = ["sharevid.org"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(url, data).content
r = re.search("file\s*:\s*'(.+?)'", html)
if r:
return r.group(1)
else:
raise UrlResolver.ResolverError('could not find video')
def get_url(self, host, media_id):
return 'http://sharevid.org/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:embed-)?([0-9a-zA-Z]+)', url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?sharevid.org/[0-9A-Za-z]+', url) or re.match('http://(www.)?sharevid.org/embed-[0-9A-Za-z]+[\-]*\d*[x]*\d*.*[html]*', url) or 'sharevid' in host)
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL ().
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
import order
import report_lunch_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# Copyright (C) 2013 Google Inc., authors, and contributors
# Licensed under http://www.apache.org/licenses/LICENSE-2.0
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
from collections import namedtuple
from flask import session
from ggrc.login import get_current_user
from .user_permissions import UserPermissions
Permission = namedtuple('Permission', 'action resource_type context_id')
class DefaultUserPermissionsProvider(object):
def __init__(self, settings):
pass
def permissions_for(self, user):
return DefaultUserPermissions()
class DefaultUserPermissions(UserPermissions):
def _is_allowed(self, permission):
if 'permissions' not in session:
return False
permissions = session['permissions']
if permissions is None:
return True
return permission.context_id in \
permissions\
.get(permission.action, {})\
.get(permission.resource_type, ())
def is_allowed_create(self, resource_type, context_id):
"""Whether or not the user is allowed to create a resource of the specified
type in the context."""
return self._is_allowed(Permission('create', resource_type, context_id))
def is_allowed_read(self, resource_type, context_id):
"""Whether or not the user is allowed to read a resource of the specified
type in the context."""
return self._is_allowed(Permission('read', resource_type, context_id))
def is_allowed_update(self, resource_type, context_id):
"""Whether or not the user is allowed to update a resource of the specified
type in the context."""
return self._is_allowed(Permission('update', resource_type, context_id))
def is_allowed_delete(self, resource_type, context_id):
"""Whether or not the user is allowed to delete a resource of the specified
type in the context."""
return self._is_allowed(Permission('delete', resource_type, context_id))
def _get_contexts_for(self, action, resource_type):
if 'permissions' not in session:
return False
permissions = session['permissions']
if permissions is None:
return None
ret = list(permissions.get(action, {}).get(resource_type, ()))
return ret
def create_contexts_for(self, resource_type):
"""All contexts in which the user has create permission."""
return self._get_contexts_for('create', resource_type)
def read_contexts_for(self, resource_type):
"""All contexts in which the user has read permission."""
return self._get_contexts_for('read', resource_type)
def update_contexts_for(self, resource_type):
"""All contexts in which the user has update permission."""
return self._get_contexts_for('update', resource_type)
def delete_contexts_for(self, resource_type):
"""All contexts in which the user has delete permission."""
return self._get_contexts_for('delete', resource_type)
"""
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import warnings
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
is_superuser = models.BooleanField(
_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
is_managed = models.BooleanField(
_('managed'), default=False,
help_text=_('Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
# the admin requires this method
return self.is_superuser
def get_full_name(self):
return self.first_name
def get_short_name(self):
return self.username
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry.models import (
AuditLogEntry, Activity, AuthIdentity, GroupBookmark,
OrganizationMember, UserOption
)
for obj in OrganizationMember.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
for obj in GroupBookmark.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
for obj in UserOption.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def get_display_name(self):
return self.first_name or self.email or self.username
def is_active_superuser(self):
# TODO(dcramer): add VPN support via INTERNAL_IPS + ipaddr ranges
return self.is_superuser
#!/usr/bin/env python
import os, sys, tempfile
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
# Read parms.
input_name = sys.argv[1]
output_name = sys.argv[2]
attribute_name = sys.argv[3]
# Create temp files.
tmp_name1 = tempfile.NamedTemporaryFile().name
tmp_name2 = tempfile.NamedTemporaryFile().name
# Do conversion.
skipped_lines = 0
first_skipped_line = 0
out = open( tmp_name1, 'w' )
# Write track data to temporary file.
i = 0
for i, line in enumerate( file( input_name ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
try:
elems = line.split( '\t' )
start = str( int( elems[3] ) - 1 ) # GTF coordinates are 1-based, BedGraph are 0-based.
strand = elems[6]
if strand not in ['+', '-']:
strand = '+'
attributes_list = elems[8].split(";")
attributes = {}
for name_value_pair in attributes_list:
pair = name_value_pair.strip().split(" ")
name = pair[0].strip()
if name == '':
continue
# Need to strip double quote from values
value = pair[1].strip(" \"")
attributes[name] = value
value = attributes[ attribute_name ]
# GTF format: chrom source, name, chromStart, chromEnd, score, strand, frame, attributes.
# BedGraph format: chrom, chromStart, chromEnd, value
out.write( "%s\t%s\t%s\t%s\n" %( elems[0], start, elems[4], value ) )
except:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
else:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
out.close()
# Sort tmp file by chromosome name and chromosome start to create ordered track data.
cmd = "sort -k1,1 -k2,2n < %s > %s" % ( tmp_name1, tmp_name2 )
try:
os.system(cmd)
os.remove(tmp_name1)
except Exception, ex:
sys.stderr.write( "%s\n" % ex )
sys.exit(1)
# Create bedgraph file by combining track definition with ordered track data.
cmd = "echo 'track type=bedGraph' | cat - %s > %s " % ( tmp_name2, output_name )
try:
os.system(cmd)
os.remove(tmp_name2)
except Exception, ex:
sys.stderr.write( "%s\n" % ex )
sys.exit(1)
info_msg = "%i lines converted to BEDGraph. " % ( i + 1 - skipped_lines )
if skipped_lines > 0:
info_msg += "Skipped %d blank/comment/invalid lines starting with line #%d." %( skipped_lines, first_skipped_line )
print info_msg
if __name__ == "__main__": __main__()
"""
Tests for memcache in util app
"""
from django.test import TestCase
from django.core.cache import caches
from util.memcache import safe_key
class MemcacheTest(TestCase):
"""
Test memcache key cleanup
"""
# Test whitespace, control characters, and some non-ASCII UTF-16
UNICODE_CHAR_CODES = (range(30) + [127] +
[129, 500, 2 ** 8 - 1, 2 ** 8 + 1, 2 ** 16 - 1])
def setUp(self):
super(MemcacheTest, self).setUp()
self.cache = caches['default']
def test_safe_key(self):
key = safe_key('test', 'prefix', 'version')
self.assertEqual(key, 'prefix:version:test')
def test_numeric_inputs(self):
# Numeric key
self.assertEqual(safe_key(1, 'prefix', 'version'), 'prefix:version:1')
# Numeric prefix
self.assertEqual(safe_key('test', 5, 'version'), '5:version:test')
# Numeric version
self.assertEqual(safe_key('test', 'prefix', 5), 'prefix:5:test')
def test_safe_key_long(self):
# Choose lengths close to memcached's cutoff (250)
for length in [248, 249, 250, 251, 252]:
# Generate a key of that length
key = 'a' * length
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for key length {0}".format(length))
def test_long_key_prefix_version(self):
# Long key
key = safe_key('a' * 300, 'prefix', 'version')
self.assertTrue(self._is_valid_key(key))
# Long prefix
key = safe_key('key', 'a' * 300, 'version')
self.assertTrue(self._is_valid_key(key))
# Long version
key = safe_key('key', 'prefix', 'a' * 300)
self.assertTrue(self._is_valid_key(key))
def test_safe_key_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a key with that character
key = unichr(unicode_char)
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_prefix_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a prefix with that character
prefix = unichr(unicode_char)
# Make the key safe
key = safe_key('test', prefix, '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_version_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a version with that character
version = unichr(unicode_char)
# Make the key safe
key = safe_key('test', '', version)
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def _is_valid_key(self, key):
"""
Test that a key is memcache-compatible.
Based on Django's validator in core.cache.backends.base
"""
# Check the length
if len(key) > 250:
return False
# Check that there are no spaces or control characters
for char in key:
if ord(char) < 33 or ord(char) == 127:
return False
return True
# -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import serial
except ImportError:
_logger.error('OpenERP module hw_scale depends on the pyserial python module')
serial = None
class Scale(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.scalelock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/serial/by-id/'
self.weight = 0
self.weight_info = 'ok'
self.device = None
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Scale Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Scale: '+message)
def get_device(self):
try:
devices = [ device for device in listdir(self.input_dir)]
scales = [ device for device in devices if ('mettler' in device.lower()) or ('toledo' in device.lower()) ]
if len(scales) > 0:
print join(self.input_dir,scales[0])
self.set_status('connected','Connected to '+scales[0])
return serial.Serial(join(self.input_dir,scales[0]),
baudrate = 9600,
bytesize = serial.SEVENBITS,
stopbits = serial.STOPBITS_ONE,
parity = serial.PARITY_EVEN,
#xonxoff = serial.XON,
timeout = 0.01,
writeTimeout= 0.01)
else:
self.set_status('disconnected','Scale Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_weight(self):
self.lockedstart()
return self.weight
def get_weight_info(self):
self.lockedstart()
return self.weight_info
def get_status(self):
self.lockedstart()
return self.status
def read_weight(self):
with self.scalelock:
if self.device:
try:
self.device.write('W')
time.sleep(0.1)
answer = []
while True:
char = self.device.read(1)
if not char:
break
else:
answer.append(char)
if '?' in answer:
stat = ord(answer[answer.index('?')+1])
if stat == 0:
self.weight_info = 'ok'
else:
self.weight_info = []
if stat & 1 :
self.weight_info.append('moving')
if stat & 1 << 1:
self.weight_info.append('over_capacity')
if stat & 1 << 2:
self.weight_info.append('negative')
self.weight = 0.0
if stat & 1 << 3:
self.weight_info.append('outside_zero_capture_range')
if stat & 1 << 4:
self.weight_info.append('center_of_zero')
if stat & 1 << 5:
self.weight_info.append('net_weight')
else:
answer = answer[1:-1]
if 'N' in answer:
answer = answer[0:-1]
try:
self.weight = float(''.join(answer))
except ValueError as v:
self.set_status('error','No data Received, please power-cycle the scale');
self.device = None
except Exception as e:
self.set_status('error',str(e))
self.device = None
def set_zero(self):
with self.scalelock:
if self.device:
try:
self.device.write('Z')
except Exception as e:
self.set_status('error',str(e))
self.device = None
def set_tare(self):
with self.scalelock:
if self.device:
try:
self.device.write('T')
except Exception as e:
self.set_status('error',str(e))
self.device = None
def clear_tare(self):
with self.scalelock:
if self.device:
try:
self.device.write('C')
except Exception as e:
self.set_status('error',str(e))
self.device = None
def run(self):
self.device = None
while True:
if self.device:
self.read_weight()
time.sleep(0.05)
else:
with self.scalelock:
self.device = self.get_device()
if not self.device:
time.sleep(5)
s = Scale()
hw_proxy.drivers['scale'] = s
class ScaleDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scale_read/', type='json', auth='none', cors='*')
def scale_read(self):
return {'weight':s.get_weight(), 'unit':'kg', 'info':s.get_weight_info()}
@http.route('/hw_proxy/scale_zero/', type='json', auth='none', cors='*')
def scale_zero(self):
s.set_zero()
return True
@http.route('/hw_proxy/scale_tare/', type='json', auth='none', cors='*')
def scale_tare(self):
s.set_tare()
return True
@http.route('/hw_proxy/scale_clear_tare/', type='json', auth='none', cors='*')
def scale_clear_tare(self):
s.clear_tare()
return True
# -*- coding: utf-8 -*-
"""
Host language sub-package for the pyRdfa package. It contains variables and possible modules necessary to manage various RDFa
host languages.
This module may have to be modified if a new host language is added to the system. In many cases the rdfa_core as a host language is enough, because there is no need for a special processing. However, some host languages may require an initial context, or their value may control some transformations, in which case additional data have to be added to this module. This module header contains all tables and arrays to be adapted, and the module content may contain specific transformation methods.
@summary: RDFa Host package
@requires: U{RDFLib package}
@organization: U{World Wide Web Consortium}
@author: U{Ivan Herman}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE}
@var content_to_host_language: a dictionary mapping a media type to a host language
@var preferred_suffixes: mapping from preferred suffixes for media types; used if the file is local, ie, there is not HTTP return value for the media type. It corresponds to the preferred suffix in the media type registration
@var initial_contexts: mapping from host languages to list of initial contexts
@var accept_xml_base: list of host languages that accept the xml:base attribute for base setting
@var accept_xml_lang: list of host languages that accept the xml:lang attribute for language setting. Note that XHTML and HTML have some special rules, and those are hard coded...
@var warn_xmlns_usage: list of host languages that should generate a warning for the usage of @xmlns (for RDFa 1.1)
@var accept_embedded_rdf_xml: list of host languages that might also include RDF data using an embedded RDF/XML (e.g., SVG). That RDF data may be merged with the output
@var accept_embedded_turtle: list of host languages that might also include RDF data using a C{script} element. That RDF data may be merged with the output
@var require_embedded_rdf: list of languages that must accept embedded RDF, ie, the corresponding option is irrelevant
@var host_dom_transforms: dictionary mapping a host language to an array of methods that are invoked at the beginning of the parsing process for a specific node. That function can do a last minute change on that DOM node, eg, adding or modifying an attribute. The method's signature is (node, state), where node is the DOM node, and state is the L{Execution context}.
@var predefined_1_0_rel: terms that are hardcoded for HTML+RDF1.0 and replace the initial context for that version
@var beautifying_prefixes: this is really just to make the output more attractive: for each media type a dictionary of prefix-URI pairs that can be used to make the terms look better...
@var default_vocabulary: as its name suggests, default @vocab value for a specific host language
"""
"""
$Id: __init__.py,v 1.21 2013-10-16 11:49:11 ivan Exp $
$Date: 2013-10-16 11:49:11 $
"""
__version__ = "3.0"
from .atom import atom_add_entry_type
from .html5 import html5_extra_attributes, remove_rel
class HostLanguage :
"""An enumeration style class: recognized host language types for this processor of RDFa. Some processing details may depend on these host languages. "rdfa_core" is the default Host Language is nothing else is defined."""
rdfa_core = "RDFa Core"
xhtml = "XHTML+RDFa"
xhtml5 = "XHTML5+RDFa"
html5 = "HTML5+RDFa"
atom = "Atom+RDFa"
svg = "SVG+RDFa"
# initial contexts for host languages
initial_contexts = {
HostLanguage.xhtml : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1",
"http://www.w3.org/2011/rdfa-context/xhtml-rdfa-1.1"],
HostLanguage.xhtml5 : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1"],
HostLanguage.html5 : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1"],
HostLanguage.rdfa_core : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1"],
HostLanguage.atom : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1"],
HostLanguage.svg : ["http://www.w3.org/2011/rdfa-context/rdfa-1.1"],
}
beautifying_prefixes = {
HostLanguage.xhtml : {
"xhv" : "http://www.w3.org/1999/xhtml/vocab#"
},
# HostLanguage.html5 : {
# "xhv" : "http://www.w3.org/1999/xhtml/vocab#"
# },
# HostLanguage.xhtml5 : {
# "xhv" : "http://www.w3.org/1999/xhtml/vocab#"
# },
HostLanguage.atom : {
"atomrel" : "http://www.iana.org/assignments/relation/"
}
}
accept_xml_base = [ HostLanguage.rdfa_core, HostLanguage.atom, HostLanguage.svg, HostLanguage.xhtml5 ]
accept_xml_lang = [ HostLanguage.rdfa_core, HostLanguage.atom, HostLanguage.svg ]
accept_embedded_rdf_xml = [ HostLanguage.svg, HostLanguage.rdfa_core ]
accept_embedded_turtle = [ HostLanguage.svg, HostLanguage.html5, HostLanguage.xhtml5, HostLanguage.xhtml ]
# some languages, eg, SVG, require that embedded content should be combined with the default graph,
# ie, it cannot be turned down by an option
require_embedded_rdf = [ HostLanguage.svg ]
warn_xmlns_usage = [ HostLanguage.html5, HostLanguage.xhtml5, HostLanguage.xhtml ]
host_dom_transforms = {
HostLanguage.atom : [atom_add_entry_type],
HostLanguage.html5 : [html5_extra_attributes, remove_rel],
HostLanguage.xhtml5 : [html5_extra_attributes, remove_rel]
}
default_vocabulary = {
HostLanguage.atom : "http://www.iana.org/assignments/relation/"
}
predefined_1_0_rel = ['alternate', 'appendix', 'cite', 'bookmark', 'chapter', 'contents',
'copyright', 'glossary', 'help', 'icon', 'index', 'meta', 'next', 'p3pv1', 'prev', 'previous',
'role', 'section', 'subsection', 'start', 'license', 'up', 'last', 'stylesheet', 'first', 'top']
# ----------------------------------------------------------------------------------------------------------
class MediaTypes :
"""An enumeration style class: some common media types (better have them at one place to avoid misstyping...)"""
rdfxml = 'application/rdf+xml'
turtle = 'text/turtle'
html = 'text/html'
xhtml = 'application/xhtml+xml'
svg = 'application/svg+xml'
svgi = 'image/svg+xml'
smil = 'application/smil+xml'
atom = 'application/atom+xml'
xml = 'application/xml'
xmlt = 'text/xml'
nt = 'text/plain'
# mapping from (some) content types to RDFa host languages. This may control the exact processing or at least the initial context (see below)...
content_to_host_language = {
MediaTypes.html : HostLanguage.html5,
MediaTypes.xhtml : HostLanguage.xhtml,
MediaTypes.xml : HostLanguage.rdfa_core,
MediaTypes.xmlt : HostLanguage.rdfa_core,
MediaTypes.smil : HostLanguage.rdfa_core,
MediaTypes.svg : HostLanguage.svg,
MediaTypes.svgi : HostLanguage.svg,
MediaTypes.atom : HostLanguage.atom,
}
# mapping preferred suffixes to media types...
preferred_suffixes = {
".rdf" : MediaTypes.rdfxml,
".ttl" : MediaTypes.turtle,
".n3" : MediaTypes.turtle,
".owl" : MediaTypes.rdfxml,
".html" : MediaTypes.html,
".shtml" : MediaTypes.html,
".xhtml" : MediaTypes.xhtml,
".svg" : MediaTypes.svg,
".smil" : MediaTypes.smil,
".xml" : MediaTypes.xml,
".nt" : MediaTypes.nt,
".atom" : MediaTypes.atom
}
# DTD combinations that may determine the host language and the rdfa version
_XHTML_1_0 = [
("-//W3C//DTD XHTML+RDFa 1.0//EN", "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd")
]
_XHTML_1_1 = [
("-//W3C//DTD XHTML+RDFa 1.1//EN", "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-2.dtd"),
("-//W3C//DTD HTML 4.01+RDFa 1.1//EN", "http://www.w3.org/MarkUp/DTD/html401-rdfa11-1.dtd")
]
_XHTML = [
("-//W3C//DTD XHTML 1.0 Strict//EN", "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"),
("-//W3C//DTD XHTML 1.0 Transitional//EN", "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"),
("-//W3C//DTD XHTML 1.1//EN", "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd")
]
def adjust_html_version(input, rdfa_version) :
"""
Adjust the rdfa_version based on the (possible) DTD
@param input: the input stream that has to be parsed by an xml parser
@param rdfa_version: the current rdfa_version; will be returned if nothing else is found
@return: the rdfa_version, either "1.0" or "1.1, if the DTD says so, otherwise the input rdfa_version value
"""
import xml.dom.minidom
parse = xml.dom.minidom.parse
dom = parse(input)
(hl,version) = adjust_xhtml_and_version(dom, HostLanguage.xhtml, rdfa_version)
return version
def adjust_xhtml_and_version(dom, incoming_language, rdfa_version) :
"""
Check if the xhtml+RDFa is really XHTML 0 or 1 or whether it should be considered as XHTML5. This is done
by looking at the DTD. Furthermore, checks whether whether the system id signals an rdfa 1.0, in which case the
version is also set.
@param dom: top level DOM node
@param incoming_language: host language to be checked; the whole check is relevant for xhtml only.
@param rdfa_version: rdfa_version as known by the caller
@return: a tuple of the possibly modified host language (ie, set to XHTML5) and the possibly modified rdfa version (ie, set to "1.0", "1.1", or the incoming rdfa_version if nothing is found)
"""
if incoming_language == HostLanguage.xhtml :
try :
# There may not be any doctype set in the first place...
publicId = dom.doctype.publicId
systemId = dom.doctype.systemId
if (publicId, systemId) in _XHTML_1_0 :
return (HostLanguage.xhtml,"1.0")
elif (publicId, systemId) in _XHTML_1_1 :
return (HostLanguage.xhtml,"1.1")
elif (publicId, systemId) in _XHTML :
return (HostLanguage.xhtml, rdfa_version)
else :
return (HostLanguage.xhtml5, rdfa_version)
except :
# If any of those are missing, forget it...
return (HostLanguage.xhtml5, rdfa_version)
else :
return (incoming_language, rdfa_version)
#!/usr/bin/env python
"""
Checks that all examples load and synthesize successfully.
"""
import unittest
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","src","lib"))
import specCompiler
class TestExample(unittest.TestCase):
def __init__(self, spec_filename):
super(TestExample, self).__init__()
self.spec_filename = spec_filename
def runTest(self):
title_str = "#### Testing project '{0}' ####".format(self.spec_filename)
print
if sys.platform not in ['win32', 'cygwin']:
print "\033[41m" # red background color
print "#"*len(title_str)
print title_str
print "#"*len(title_str),
if sys.platform not in ['win32', 'cygwin']:
print "\033[0m" # end coloring
print
c = specCompiler.SpecCompiler(self.spec_filename)
c_out = c.compile()
self.assertIsNotNone(c_out, msg="Compilation failed due to parser error")
realizable, realizableFS, output = c_out
print output
expectedToBeUnrealizable = ("unsynth" in self.spec_filename) or \
("unreal" in self.spec_filename) or \
("unsat" in self.spec_filename)
if expectedToBeUnrealizable:
self.assertFalse(realizable, msg="Specification was realizable but we did not expect this")
else:
self.assertTrue(realizable, msg="Specification was unrealizable")
# TODO: test analysis/cores
# TODO: test config files
#self.assertEqual(function_to_test(self.input), self.output)
def getTester(spec_filename):
class NewTester(TestExample): pass
NewTester.__name__ = "TestExample_" + spec_filename.replace(".","_").replace("\\","_").replace("/","_")
return NewTester(spec_filename)
def suiteAll():
suite = unittest.TestSuite()
for fname in glob.iglob(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","src","examples","*","*.spec")):
# Skip any files untracked by git
if os.system("git ls-files \"{}\" --error-unmatch".format(fname)) != 0:
print ">>> Skipping untracked specification: {}".format(fname)
continue
suite.addTest(getTester(fname))
return suite
def suiteFromList(paths):
suite = unittest.TestSuite()
for fname in paths:
suite.addTest(getTester(fname))
return suite
if __name__ == '__main__':
# If we are called with arguments, test only those projects.
# Otherwise, test every tracked project in the examples/ directory
if len(sys.argv) >= 2:
unittest.TextTestRunner().run(suiteFromList(sys.argv[1:]))
else:
unittest.TextTestRunner().run(suiteAll())
#!/usr/bin/env python
'''
A boottool clone, but written in python and relying mostly on grubby[1].
[1] - http://git.fedorahosted.org/git/?p=grubby.git
'''
import os
import re
import sys
import optparse
import logging
import subprocess
import urllib
import tarfile
import tempfile
import shutil
import struct
#
# Get rid of DeprecationWarning messages on newer Python version while still
# making it run on properly on Python 2.4
#
try:
import hashlib as md5
except ImportError:
import md5
__all__ = ['Grubby', 'OptionParser', 'EfiVar', 'EfiToolSys',
'EliloConf', 'find_executable', 'parse_entry']
#
# Information on default requirements and installation for grubby
#
GRUBBY_REQ_VERSION = (8, 15)
GRUBBY_TARBALL_URI = ('http://pkgs.fedoraproject.org/repo/pkgs/grubby/'
'grubby-8.15.tar.bz2/c53d3f4cb5d22b25d27e3ee4c7ed5b80/'
'grubby-8.15.tar.bz2')
GRUBBY_TARBALL_MD5 = 'c53d3f4cb5d22b25d27e3ee4c7ed5b80'
GRUBBY_DEFAULT_SYSTEM_PATH = '/sbin/grubby'
GRUBBY_DEFAULT_USER_PATH = '/tmp/grubby'
#
# All options that are first class actions
# One of them should be given on the command line
#
ACTIONS = ['bootloader-probe',
'arch-probe',
'add-kernel',
'boot-once',
'install',
'remove-kernel',
'info',
'set-default',
'default',
'update-kernel',
# Commands not available in the old boottool
'grubby-version',
'grubby-version-check',
'grubby-install']
#
# When the command line is parsed, 'opts' gets attributes that are named
# after the command line options, but with slight changes
#
ACTIONS_OPT_METHOD_NAME = [act.replace('-', '_') for act in ACTIONS]
#
# Actions (as a opt/method name) that require a --title parameter
#
ACTIONS_REQUIRE_TITLE = ['boot_once', ]
#
# Include the logger (logging channel) name on the default logging config
#
LOGGING_FORMAT = "%(levelname)s: %(name)s: %(message)s"
#
# Default log object
#
log = logging.getLogger('boottool')
def find_header(hdr):
"""
Find a given header in the system.
"""
for dir in ['/usr/include', '/usr/local/include']:
file = os.path.join(dir, hdr)
if os.path.exists(file):
return file
raise ValueError('Missing header: %s' % hdr)
class EfiVar(object):
'''
Helper class to manipulate EFI firmware variables
This class has no notion of the EFI firmware variables interface, that is,
where it should read from or write to in order to create or delete EFI
variables.
On systems with kernel >= 2.6, that interface is a directory structure
under /sys/firmware/efi/vars.
On systems with kernel <= 2.4, that interface is going to be a directory
structure under /proc/efi/vars. But be advised: this has not been tested
yet on kernels <= 2.4.
'''
GUID_FMT = '16B'
GUID_CONTENT = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ATTR_NON_VOLATILE = 0x0000000000000001
ATTR_BOOTSERVICE_ACCESS = 0x0000000000000002
ATTR_RUNTIME_ACCESS = 0x0000000000000004
DEFAULT_ATTRIBUTES = (ATTR_NON_VOLATILE |
ATTR_BOOTSERVICE_ACCESS |
ATTR_RUNTIME_ACCESS)
FMT = ('512H' +
GUID_FMT +
'1L' +
'512H' +
'1L' +
'1I')
def __init__(self, name, data, guid=None, attributes=None):
'''
Instantiates a new EfiVar
:type name: string
:param name: the name of the variable that will be created
:type data: string
:param data: user data that will populate the variable
:type guid: tuple
:param guid: content for the guid value that composes the full variable
name
:param attributes: integer
:param attributes: bitwise AND of the EFI attributes this variable will
have set
'''
self.data = data
self.name = name
if guid is None:
guid = self.GUID_CONTENT
self.guid = guid
if attributes is None:
attributes = self.DEFAULT_ATTRIBUTES
self.attributes = attributes
def get_name(self):
'''
Returns the variable name in a list ready for struct.pack()
'''
l = []
for i in range(512):
l.append(0)
for i in range(len(self.name)):
l[i] = ord(self.name[i])
return l
def get_data(self):
'''
Returns the variable data in a list ready for struct.pack()
'''
l = []
for i in range(512):
l.append(0)
for i in range(len(self.data)):
l[i] = ord(self.data[i])
return l
def get_packed(self):
'''
Returns the EFI variable raw data packed by struct.pack()
This data should be written to the appropriate interface to create
an EFI variable
'''
params = self.get_name()
params += self.guid
params.append((len(self.data) * 2) + 2)
params += self.get_data()
params.append(0)
params.append(self.attributes)
return struct.pack(self.FMT, *params)
class EfiToolSys(object):
'''
Interfaces with /sys/firmware/efi/vars provided by the kernel
This interface is present on kernels >= 2.6 with CONFIG_EFI and
CONFIG_EFI_VARS options set.
'''
BASE_PATH = '/sys/firmware/efi/vars'
NEW_VAR = os.path.join(BASE_PATH, 'new_var')
DEL_VAR = os.path.join(BASE_PATH, 'del_var')
def __init__(self):
if not os.path.exists(self.BASE_PATH):
sys.exit(-1)
self.log = logging.getLogger(self.__class__.__name__)
def create_variable(self, name, data, guid=None, attributes=None):
'''
Creates a new EFI variable
:type name: string
:param name: the name of the variable that will be created
:type data: string
:param data: user data that will populate the variable
:type guid: tuple
:param guid: content for the guid value that composes the full variable
name
:param attributes: integer
:param attributes: bitwise AND of the EFI attributes this variable will
have set
'''
if not self.check_basic_structure():
return False
var = EfiVar(name, data, guid, attributes)
f = open(self.NEW_VAR, 'w')
f.write(var.get_packed())
return True
def delete_variable(self, name, data, guid=None, attributes=None):
'''
Delets an existing EFI variable
:type name: string
:param name: the name of the variable that will be deleted
:type data: string
:param data: user data that will populate the variable
:type guid: tuple
:param guid: content for the guid value that composes the full variable
name
:param attributes: integer
:param attributes: bitwise AND of the EFI attributes this variable will
have set
'''
if not self.check_basic_structure():
return False
var = EfiVar(name, data, guid, attributes)
f = open(self.DEL_VAR, 'w')
f.write(var.get_packed())
return True
def check_basic_structure(self):
'''
Checks the basic directory structure for the /sys/.../vars interface
'''
status = True
if not os.path.isdir(self.BASE_PATH):
self.log.error('Could not find the base directory interface for '
'EFI variables: "%s"', self.BASE_PATH)
status = False
if not os.path.exists(self.NEW_VAR):
self.log.error('Could not find the file interface for creating new'
' EFI variables: "%s"', self.NEW_VAR)
status = False
if not os.path.exists(self.DEL_VAR):
self.log.error('Could not find the file interface for deleting '
'EFI variables: "%s"', self.DEL_VAR)
status = False
return status
class EliloConf(object):
'''
A simple parser for elilo configuration file
Has simple features to add and remove global options only, as this is all
we need. grubby takes care of manipulating the boot entries themselves.
'''
def __init__(self, path='/etc/elilo.conf'):
'''
Instantiates a new EliloConf
:type path: string
:param path: path to elilo.conf
'''
self.path = path
self.global_options_to_add = {}
self.global_options_to_remove = {}
self._follow_symlink()
def _follow_symlink(self):
'''
Dereference the path if it's a symlink and make it absolute
elilo.conf usually is a symlink to the EFI boot partition, so we
better follow it to the proper location.
'''
if os.path.islink(self.path):
self.path_link = self.path
self.path = os.path.realpath(self.path_link)
self.path = os.path.abspath(self.path)
def add_global_option(self, key, val=None):
'''
Adds a global option to the updated elilo configuration file
:type key: string
:param key: option name
:type val: string or None
:param key: option value or None for options with no values
:return: None
'''
self.global_options_to_add[key] = val
def remove_global_option(self, key, val=None):
'''
Removes a global option to the updated elilo configuration file
:type key: string
:param key: option name
:type val: string or None
:param key: option value or None for options with no values
:return: None
'''
self.global_options_to_remove[key] = val
def line_to_keyval(self, line):
'''
Transforms a text line from the configuration file into a tuple
:type line: string
:param line: line of text from the configuration file
:return: a tuple with key and value
'''
parts = line.split('=', 1)
key = parts[0].rstrip()
if len(parts) == 1:
val = None
elif len(parts) == 2:
val = parts[1].strip()
return (key, val)
def keyval_to_line(self, keyval):
'''
Transforms a tuple into a text line suitable for the config file
:type keyval: tuple
:param keyval: a tuple containing key and value
:return: a text line suitable for the config file
'''
key, val = keyval
if val is None:
return '%s\n' % key
else:
return '%s=%s\n' % (key, val)
def matches_global_option_to_remove(self, line):
'''
Utility method to check if option is to be removed
:type line: string
:param line: line of text from the configuration file
:return: True or False
'''
key, val = self.line_to_keyval(line)
if key in self.global_options_to_remove:
return True
else:
return False
def matches_global_option_to_add(self, line):
'''
Utility method to check if option is to be added
:type line: string
:param line: line of text from the configuration file
:return: True or False
'''
key, val = self.line_to_keyval(line)
if key in self.global_options_to_add:
return True
else:
return False
def get_updated_content(self):
'''
Returns the config file content with options to add and remove applied
'''
output = ''
for key, val in self.global_options_to_add.items():
output += self.keyval_to_line((key, val))
eliloconf = open(self.path, 'r')
for line in eliloconf.readlines():
if self.matches_global_option_to_remove(line):
continue
if self.matches_global_option_to_add(line):
continue
else:
output += line
eliloconf.close()
return output
def update(self):
'''
Writes the updated content to the configuration file
'''
content = self.get_updated_content()
eliloconf_write = open(self.path, 'w')
eliloconf_write.write(content)
eliloconf_write.close()
def find_executable(executable, favorite_path=None):
'''
Returns whether the system has a given executable
:type executable: string
:param executable: the name of a file that can be read and executed
'''
if os.path.isabs(executable):
paths = [os.path.dirname(executable)]
executable = os.path.basename(executable)
else:
paths = os.environ['PATH'].split(':')
if favorite_path is not None and favorite_path not in paths:
paths.insert(0, favorite_path)
for d in paths:
f = os.path.join(d, executable)
if os.path.exists(f) and os.access(f, os.R_OK | os.X_OK):
return f
return None
def parse_entry(entry_str, separator='='):
"""
Parse entry as returned by boottool.
:param entry_str: one entry information as returned by boottool
:return: dictionary of key -> value where key is the string before
the first ":" in an entry line and value is the string after
it
"""
entry = {}
for line in entry_str.splitlines():
if len(line) == 0:
continue
try:
name, value = line.split(separator, 1)
except ValueError:
continue
name = name.strip()
value = value.strip()
if name == 'index':
# index values are integrals
value = int(value)
entry[name] = value
return entry
def detect_distro_type():
'''
Simple distro detection based on release/version files
'''
if os.path.exists('/etc/redhat-release'):
return 'redhat'
elif os.path.exists('/etc/debian_version'):
return 'debian'
elif os.path.exists('/etc/issue'):
if re.match(r'.*SUSE.*', open('/etc/issue').read()):
return 'suse'
else:
return None
class DebianBuildDeps(object):
'''
Checks and install grubby build dependencies on Debian (like) systems
Tested on:
* Debian Squeeze (6.0)
* Ubuntu 12.04 LTS
'''
PKGS = ['gcc', 'make', 'libpopt-dev', 'libblkid-dev']
def check(self):
'''
Checks if necessary packages are already installed
'''
result = True
for p in self.PKGS:
args = ['dpkg-query', '--show', '--showformat=${Status}', p]
output = subprocess.Popen(args, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True).stdout.read()
if not output == 'install ok installed':
result = False
return result
def install(self):
'''
Attempt to install the build dependencies via a package manager
'''
if self.check():
return True
else:
try:
args = ['apt-get', 'update', '-qq']
subprocess.call(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
args = ['apt-get', 'install', '-qq'] + self.PKGS
subprocess.call(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
return self.check()
class RPMBuildDeps(object):
'''
Base class for RPM based systems
'''
def check(self):
'''
Checks if necessary packages are already installed
'''
result = True
for p in self.PKGS:
args = ['rpm', '-q', '--qf=%{NAME}', p]
output = subprocess.Popen(args, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True).stdout.read()
if not output.startswith(p):
result = False
return result
class SuseBuildDeps(RPMBuildDeps):
'''
Checks and install grubby build dependencies on SuSE (like) systems
Tested on:
* OpenSuSE 12.2
'''
PKGS = ['gcc', 'make', 'popt-devel', 'libblkid-devel']
def install(self):
'''
Attempt to install the build dependencies via a package manager
'''
if self.check():
return True
else:
try:
args = ['zypper', '-n', '--no-cd', 'install'] + self.PKGS
result = subprocess.call(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
return self.check()
class RedHatBuildDeps(RPMBuildDeps):
'''
Checks and install grubby build dependencies on RedHat (like) systems
Tested on:
* Fedora 17
* RHEL 5
* RHEL 6
'''
PKGS = ['gcc', 'make']
REDHAT_RELEASE_RE = re.compile('.*\srelease\s(\d)\.(\d)\s.*')
def __init__(self):
'''
Initializes a new dep installer, taking into account RHEL version
'''
match = self.REDHAT_RELEASE_RE.match(
open('/etc/redhat-release').read())
if match:
major, minor = match.groups()
if int(major) <= 5:
self.PKGS += ['popt', 'e2fsprogs-devel']
else:
self.PKGS += ['popt-devel', 'libblkid-devel']
def install(self):
'''
Attempt to install the build dependencies via a package manager
'''
if self.check():
return True
else:
try:
args = ['yum', 'install', '-q', '-y'] + self.PKGS
# This is an extra safety step, to install the needed header
# in case the blkid headers package could not be detected
args += ['/usr/include/popt.h',
'/usr/include/blkid/blkid.h']
result = subprocess.call(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
return self.check()
DISTRO_DEPS_MAPPING = {
'debian': DebianBuildDeps,
'redhat': RedHatBuildDeps,
'suse': SuseBuildDeps
}
def install_grubby_if_necessary(path=None):
'''
Installs grubby if it's necessary on this system
Or if the required version is not sufficient for the needs of boottool
'''
installed_grubby = False
if path is None:
if find_executable(GRUBBY_DEFAULT_USER_PATH):
executable = GRUBBY_DEFAULT_USER_PATH
else:
executable = find_executable(GRUBBY_DEFAULT_SYSTEM_PATH)
else:
executable = find_executable(path)
if executable is None:
log.info('Installing grubby because it was not found on this system')
grubby = Grubby()
path = grubby.grubby_install()
installed_grubby = True
else:
grubby = Grubby(executable)
current_version = grubby.get_grubby_version()
if current_version is None:
log.error('Could not find version for grubby executable "%s"',
executable)
path = grubby.grubby_install()
installed_grubby = True
elif current_version < GRUBBY_REQ_VERSION:
log.info('Installing grubby because currently installed '
'version (%s.%s) is not recent enough',
current_version[0], current_version[1])
path = grubby.grubby_install()
installed_grubby = True
if installed_grubby:
grubby = Grubby(path)
installed_version = grubby.get_grubby_version_raw()
log.debug('Installed: %s', installed_version)
class GrubbyInstallException(Exception):
'''
Exception that signals failure when doing grubby installation
'''
pass
class Grubby(object):
'''
Grubby wrapper
This class calls the grubby binary for most commands, but also
adds some functionality that is not really suited to be included
in int, such as boot-once.
'''
SUPPORTED_BOOTLOADERS = ('lilo', 'grub2', 'grub', 'extlinux', 'yaboot',
'elilo')
def __init__(self, path=None, opts=None):
self._set_path(path)
self.bootloader = None
self.opts = opts
self.log = logging.getLogger(self.__class__.__name__)
if os.environ.has_key('BOOTTOOL_DEBUG_RUN'):
self.debug_run = True
else:
self.debug_run = False
self._check_grubby_version()
self._set_bootloader()
def _set_path(self, path=None):
"""
Set grubby path.
If path is not provided, check first if there's a built grubby,
then look for the system grubby.
:param path: Alternate grubby path.
"""
if path is None:
if os.path.exists(GRUBBY_DEFAULT_USER_PATH):
self.path = GRUBBY_DEFAULT_USER_PATH
else:
self.path = GRUBBY_DEFAULT_SYSTEM_PATH
else:
self.path = path
#
# The following block contain utility functions that are used to build
# most of these class methods, such as methods for running commands
# and preparing grubby command line switches.
#
def _check_grubby_version(self):
'''
Checks the version of grubby in use and warns if it's not good enough
'''
current_version = self.get_grubby_version()
if current_version is None:
self.log.warn('Could not detect current grubby version. It may '
'be that you are running an unsupported version '
'of grubby')
elif current_version < GRUBBY_REQ_VERSION:
self.log.warn('version %s.%s being used is not guaranteed to '
'work properly. Mininum required version is %s.%s.',
current_version[0], current_version[1],
GRUBBY_REQ_VERSION[0], GRUBBY_REQ_VERSION[1])
def _run_get_output(self, arguments):
'''
Utility function that runs a command and returns command output
'''
if self.debug_run:
self.log.debug('running: "%s"', ' '.join(arguments))
result = None
try:
result = subprocess.Popen(arguments, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True).stdout.read()
except:
pass
if result is not None:
result = result.strip()
if self.debug_run:
logging.debug('previous command output: "%s"', result)
else:
self.log.error('_run_get_output error while running: "%s"',
' '.join(arguments))
return result
def _run_get_output_err(self, arguments):
'''
Utility function that runs a command and returns command output
'''
if self.debug_run:
self.log.debug('running: "%s"', ' '.join(arguments))
result = None
try:
result = subprocess.Popen(arguments, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True).stdout.read()
except:
pass
if result is not None:
result = result.strip()
if self.debug_run:
logging.debug('previous command output/error: "%s"', result)
else:
self.log.error('_run_get_output_err error while running: "%s"',
' '.join(arguments))
return result
def _run_get_return(self, arguments):
'''
Utility function that runs a command and returns status code
'''
if self.debug_run:
self.log.debug('running: "%s"', ' '.join(arguments))
result = None
try:
result = subprocess.call(arguments)
if self.debug_run:
logging.debug('previous command result: %s', result)
except OSError:
result = -1
self.log.error('caught OSError, returning %s', result)
return result
def _set_bootloader(self, bootloader=None):
'''
Attempts to detect what bootloader is installed on the system
The result of this method is used in all other calls to grubby,
so that it acts accordingly to the bootloader detected.
'''
if bootloader is None:
result = self.get_bootloader()
if result is not None:
self.bootloader = result
else:
if bootloader in self.SUPPORTED_BOOTLOADERS:
self.bootloader = bootloader
else:
raise ValueError('Bootloader "%s" is not supported' %
bootloader)
def _run_grubby_prepare_args(self, arguments, include_bootloader=True):
'''
Prepares the argument list when running a grubby command
'''
args = []
if self.path is None:
self._set_path()
args.append(self.path)
if self.path is not None and not os.path.exists(self.path):
self.log.error('grubby executable does not exist: "%s"', self.path)
if not os.access(self.path, os.R_OK | os.X_OK):
self.log.error('insufficient permissions (read and execute) '
'for grubby executable: "%s"', self.path)
# If a bootloader has been detected, that is, a mode has been set,
# it's passed as the first command line argument to grubby
if include_bootloader and self.bootloader is not None:
args.append('--%s' % self.bootloader)
# Override configuration file
if self.opts is not None and self.opts.config_file:
args.append('--config-file=%s' % self.opts.config_file)
args += arguments
return args
def _run_grubby_get_output(self, arguments, include_bootloader=True):
'''
Utility function that runs grubby with arguments and returns output
'''
args = self._run_grubby_prepare_args(arguments, include_bootloader)
return self._run_get_output(args)
def _run_grubby_get_return(self, arguments, include_bootloader=True):
'''
Utility function that runs grubby with and returns status code
'''
args = self._run_grubby_prepare_args(arguments, include_bootloader)
return self._run_get_return(args)
def _extract_tarball(self, tarball, directory):
'''
Extract tarball into the an directory
This code assume the first (or only) entry is the main directory
:type tarball: string
:param tarball: tarball file path
:type directory: string
:param directory: directory path
:return: path of toplevel directory as extracted from tarball
'''
f = tarfile.open(tarball)
members = f.getmembers()
topdir = members[0]
assert topdir.isdir()
# we can not use extractall() because it is not available on python 2.4
for m in members:
f.extract(m, directory)
return os.path.join(directory, topdir.name)
def _get_entry_indexes(self, info):
'''
Returns the indexes found in a get_info() output
:type info: list of lines
:param info: result of utility method get_info()
:return: maximum index number
'''
indexes = []
for line in self.get_info_lines():
try:
key, value = line.split("=")
if key == 'index':
indexes.append(int(value))
except ValueError:
pass
return indexes
def _index_for_title(self, title):
'''
Returns the index of an entry based on the title of the entry
:type title: string
:param title: the title of the entry
:return: the index of the given entry or None
'''
if self._is_number(title):
return title
info = self.get_info_lines()
for i in self._get_entry_indexes(info):
info = self.get_info(i)
if info is None:
continue
lines = info.splitlines()
looking_for = ('title=%s' % title,
'label=%s' % title)
for line in lines:
if line in looking_for:
return i
return None
def _info_filter(self, info, key, value=None):
'''
Filters info, looking for keys, optionally set with a given value
:type info: list of lines
:param info: result of utility method get_info()
:type key: string
:param key: filter based on this key
:type value: string
:param value: filter based on this value
:return: value or None
'''
for line in info:
if value is not None:
looking_for = '%s=%s' % (key, value)
if line == looking_for:
return line.split("=")[1]
else:
if line.startswith("%s=" % key):
return line.split("=")[1]
return None
def _kernel_for_title(self, title):
'''
Returns the kernel path for an entry based on its title
:type title: string
:param title: the title of the entry
:return: the kernel path of None
'''
index = self._index_for_title(title)
if index is not None:
info = self.get_info_lines(index)
kernel = self._info_filter(info, 'kernel')
return kernel
else:
return None
def _is_number(self, data):
'''
Returns true if supplied data is an int or string with digits
'''
if isinstance(data, int):
return True
elif isinstance(data, str) and data.isdigit():
return True
return False
def _get_entry_selection(self, data):
'''
Returns a valid grubby parameter for commands such as --update-kernel
'''
if self._is_number(data):
return data
elif isinstance(data, str) and data.startswith('/'):
# assume it's the kernel filename
return data
elif isinstance(data, str):
return self._kernel_for_title(data)
else:
raise ValueError("Bad value for 'kernel' parameter. Expecting "
"either and int (index) or string (kernel or "
"title)")
def _remove_duplicate_cmdline_args(self, cmdline):
"""
Remove the duplicate entries in cmdline making sure that the first
duplicate occurrences are the ones removed and the last one remains
(this is in order to not change the semantics of the "console"
parameter where the last occurrence has special meaning)
:param cmdline: a space separate list of kernel boot parameters
(ex. 'console=ttyS0,57600n8 nmi_watchdog=1')
:return: a space separated list of kernel boot parameters without
duplicates
"""
copied = set()
new_args = []
for arg in reversed(cmdline.split()):
if arg not in copied:
new_args.insert(0, arg)
copied.add(arg)
return ' '.join(new_args)
#
# The following methods implement a form of "API" that action methods
# can rely on. Another goal is to maintain compatibility with the current
# client side API in autotest (client/shared/boottool.py)
#
def get_bootloader(self):
'''
Get the bootloader name that is detected on this machine
This module performs the same action as client side boottool.py
get_type() method, but with a better name IMHO.
:return: name of detected bootloader
'''
args = [self.path, '--bootloader-probe']
output = self._run_get_output_err(args)
if output is None:
return None
if output.startswith('grubby: bad argument'):
return None
elif output not in self.SUPPORTED_BOOTLOADERS:
return None
return output
# Alias for client side boottool.py API
get_type = get_bootloader
# Alias for boottool app
bootloader_probe = get_bootloader
def get_architecture(self):
'''
Get the system architecture
This is much simpler version then the original boottool version, that
does not attempt to filter the result of the command / system call
that returns the architecture.
:return: string with system architecture, such as x86_64, ppc64, etc
'''
return os.uname()[4]
# Alias for boottool app
arch_probe = get_architecture
def get_titles(self):
'''
Get the title of all boot entries.
:return: list with titles of boot entries
'''
titles = []
for line in self.get_info_lines():
try:
key, value = line.split("=")
if key in ['title', 'label']:
titles.append(value)
except ValueError:
pass
return titles
def get_default_index(self):
'''
Get the default entry index.
This module performs the same action as client side boottool.py
get_default() method, but with a better name IMHO.
:return: an integer with the the default entry.
'''
default_index = self._run_grubby_get_output(['--default-index'])
if default_index is not None and default_index:
default_index = int(default_index)
return default_index
# Alias for client side boottool.py API
get_default = get_default_index
# Alias for boottool app
default = get_default_index
def set_default_by_index(self, index):
"""
Sets the given entry number to be the default on every next boot
To set a default only for the next boot, use boot_once() instead.
This module performs the same action as client side boottool.py
set_default() method, but with a better name IMHO.
Note: both --set-default= and --set-default-index=
on grubby returns no error when it doesn't find the kernel or
index. So this method will, until grubby gets fixed, always return
success.
:param index: entry index number to set as the default.
"""
return self._run_grubby_get_return(['--set-default-index=%s' % index])
# Alias for client side boottool.py API
set_default = set_default_by_index
def get_default_title(self):
'''
Get the default entry title.
Conforms to the client side boottool.py API, but rely directly on
grubby functionality.
:return: a string of the default entry title.
'''
return self._run_grubby_get_output(['--default-title'])
def get_entry(self, search_info):
"""
Get a single bootloader entry information.
NOTE: if entry is "fallback" and bootloader is grub
use index instead of kernel title ("fallback") as fallback is
a special option in grub
:param search_info: can be 'default', position number or title
:return: a dictionary of key->value where key is the type of entry
information (ex. 'title', 'args', 'kernel', etc) and value
is the value for that piece of information.
"""
info = self.get_info(search_info)
return parse_entry(info)
def get_entries(self):
"""
Get all entries information.
:return: a dictionary of index -> entry where entry is a dictionary
of entry information as described for get_entry().
"""
raw = self.get_info()
entries = {}
for entry_str in re.split("index", raw):
if len(entry_str.strip()) == 0:
continue
if entry_str.startswith('boot='):
continue
if 'non linux entry' in entry_str:
continue
entry = parse_entry("index" + entry_str)
try:
entries[entry["index"]] = entry
except KeyError:
continue
return entries
def get_info(self, entry='ALL'):
'''
Returns information on a given entry, or all of them if not specified
The information is returned as a set of lines, that match the output
of 'grubby --info='
:type entry: string
:param entry: entry description, usually an index starting from 0
:return: set of lines
'''
command = '--info=%s' % entry
info = self._run_grubby_get_output([command])
if info:
return info
def get_title_for_kernel(self, path):
"""
Returns a title for a particular kernel.
:param path: path of the kernel image configured in the boot config
:return: if the given kernel path is found it will return a string
with the title for the found entry, otherwise returns None
"""
entries = self.get_entries()
for entry in entries.itervalues():
if entry.get('kernel') == path:
return entry['title']
return None
def add_args(self, kernel, args):
"""
Add cmdline arguments for the specified kernel.
:param kernel: can be a position number (index) or title
:param args: argument to be added to the current list of args
"""
entry_selection = self._get_entry_selection(kernel)
command_arguments = ['--update-kernel=%s' % entry_selection,
'--args=%s' % args]
self._run_grubby_get_return(command_arguments)
def remove_args(self, kernel, args):
"""
Removes specified cmdline arguments.
:param kernel: can be a position number (index) or title
:param args: argument to be removed of the current list of args
"""
entry_selection = self._get_entry_selection(kernel)
command_arguments = ['--update-kernel=%s' % entry_selection,
'--remove-args=%s' % args]
self._run_grubby_get_return(command_arguments)
def add_kernel(self, path, title='autoserv', root=None, args=None,
initrd=None, default=False, position='end'):
"""
Add a kernel entry to the bootloader (or replace if one exists
already with the same title).
:param path: string path to the kernel image file
:param title: title of this entry in the bootloader config
:param root: string of the root device
:param args: string with cmdline args
:param initrd: string path to the initrd file
:param default: set to True to make this entry the default one
(default False)
:param position: where to insert the new entry in the bootloader
config file (default 'end', other valid input 'start', or
# of the title)
:param xen_hypervisor: xen hypervisor image file (valid only when
xen mode is enabled)
"""
if title in self.get_titles():
self.remove_kernel(title)
parameters = ['--add-kernel=%s' % path, '--title=%s' % title]
# FIXME: grubby takes no --root parameter
# if root:
# parameters.append('--root=%s' % root)
if args:
parameters.append('--args=%s' %
self._remove_duplicate_cmdline_args(args))
if initrd:
parameters.append('--initrd=%s' % initrd)
if default:
parameters.append('--make-default')
# There's currently an issue with grubby '--add-to-bottom' feature.
# Because it uses the tail instead of the head of the list to add
# a new entry, when copying a default entry as a template
# (--copy-default), it usually copies the "recover" entries that
# usually go along a regular boot entry, specially on grub2.
#
# So, for now, until I fix grubby, we'll *not* respect the position
# (--position=end) command line option.
#
# if opts.position == 'end':
# parameters.append('--add-to-bottom')
parameters.append("--copy-default")
return self._run_grubby_get_return(parameters)
def remove_kernel(self, kernel):
"""
Removes a specific entry from the bootloader configuration.
:param kernel: entry position or entry title.
FIXME: param kernel should also take 'start' or 'end'.
"""
entry_selection = self._get_entry_selection(kernel)
if entry_selection is None:
self.log.debug('remove_kernel for title "%s" did not find an '
'entry. This is most probably NOT an error', kernel)
return 0
command_arguments = ['--remove-kernel=%s' % entry_selection]
return self._run_grubby_get_return(command_arguments)
#
# The following methods are not present in the original client side
# boottool.py
#
def get_info_lines(self, entry='ALL'):
'''
Returns information on a given entry, or all of them if not specified
The information is returned as a set of lines, that match the output
of 'grubby --info='
:type entry: string
:param entry: entry description, usually an index starting from 0
:return: set of lines
'''
info = self.get_info(entry)
if info:
return info.splitlines()
def get_grubby_version_raw(self):
'''
Get the version of grubby that is installed on this machine as is
:return: string with raw output from grubby --version
'''
return self._run_grubby_get_output(['--version'], False)
def get_grubby_version(self):
'''
Get the version of grubby that is installed on this machine
:return: tuple with (major, minor) grubby version
'''
output = self.get_grubby_version_raw()
if output is None:
self.log.warn('Could not run grubby to fetch its version')
return None
match = re.match('(grubby version)?(\s)?(\d+)\.(\d+)(.*)', output)
if match:
groups = match.groups()
return (int(groups[2]), int(groups[3]))
else:
return None
def grubby_install_patch_makefile(self):
'''
Patch makefile, making CFLAGS more forgivable to older toolchains
'''
cflags_line = 'CFLAGS += $(RPM_OPT_FLAGS) -std=gnu99 -ggdb\n'
libs_line = 'grubby_LIBS = -lblkid -lpopt -luuid\n'
shutil.move('Makefile', 'Makefile.boottool.bak')
o = open('Makefile', 'w')
for l in open('Makefile.boottool.bak').readlines():
if l.startswith('CFLAGS += '):
o.write(cflags_line)
elif l.startswith('grubby_LIBS = -lblkid -lpopt'):
o.write(libs_line)
else:
o.write(l)
o.close()
def grubby_install_backup(self, path):
'''
Backs up the current grubby binary to make room the one we'll build
:type path: string
:param path: path to the binary that should be backed up
'''
backup_path = '%s.boottool.bkp' % path
if (os.path.exists(path) and
not os.path.exists(backup_path)):
try:
shutil.move(path, backup_path)
except:
self.log.warn('Failed to backup the current grubby binary')
def grubby_install_fetch_tarball(self, topdir):
'''
Fetches and verifies the grubby source tarball
'''
tarball_name = os.path.basename(GRUBBY_TARBALL_URI)
# first look in the current directory
try:
tarball = tarball_name
f = open(tarball)
except:
try:
# then the autotest source directory
from autotest.client.shared.settings import settings
top_path = settings.get_value('COMMON', 'autotest_top_path')
tarball = os.path.join(top_path, tarball_name)
f = open(tarball)
except:
# then try to grab it from github
try:
tarball = os.path.join(topdir, tarball_name)
urllib.urlretrieve(GRUBBY_TARBALL_URI, tarball)
f = open(tarball)
except:
return None
tarball_md5 = md5.md5(f.read()).hexdigest()
if tarball_md5 != GRUBBY_TARBALL_MD5:
return None
return tarball
def grubby_build(self, topdir, tarball):
'''
Attempts to build grubby from the source tarball
'''
def log_lines(lines):
for line in lines:
self.log.debug(line.strip())
try:
find_header('popt.h')
except ValueError:
self.log.debug('No popt.h header present, skipping build')
return False
tarball_name = os.path.basename(tarball)
srcdir = os.path.join(topdir, 'src')
srcdir = self._extract_tarball(tarball, srcdir)
os.chdir(srcdir)
self.grubby_install_patch_makefile()
result = subprocess.Popen(['make'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if result.wait() != 0:
self.log.debug('Failed to build grubby during "make" step')
log_lines(result.stderr.read().splitlines())
return False
install_root = os.path.join(topdir, 'install_root')
os.environ['DESTDIR'] = install_root
result = subprocess.Popen(['make', 'install'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if result.wait() != 0:
self.log.debug('Failed to build grubby during "make install" step')
log_lines(result.stderr.read().splitlines())
return False
return True
def grubby_install(self, path=None):
'''
Attempts to install a recent enough version of grubby
So far tested on:
* Fedora 16 x86_64
* Debian 6 x86_64
* SuSE 12.1 x86_64
* RHEL 4 on ia64 (with updated python 2.4)
* RHEL 5 on ia64
* RHEL 6 on ppc64
'''
if path is None:
if os.geteuid() == 0:
path = GRUBBY_DEFAULT_SYSTEM_PATH
else:
path = GRUBBY_DEFAULT_USER_PATH
topdir = tempfile.mkdtemp()
deps_klass = DISTRO_DEPS_MAPPING.get(detect_distro_type(), None)
if deps_klass is not None:
deps = deps_klass()
if not deps.check():
self.log.warn('Installing distro build deps for grubby. This '
'may take a while, depending on bandwidth and '
'actual number of packages to install')
if not deps.install():
self.log.error('Failed to install distro build deps for '
'grubby')
tarball = self.grubby_install_fetch_tarball(topdir)
if tarball is None:
raise GrubbyInstallException('Failed to fetch grubby tarball')
srcdir = os.path.join(topdir, 'src')
install_root = os.path.join(topdir, 'install_root')
os.mkdir(install_root)
if not self.grubby_build(topdir, tarball):
raise GrubbyInstallException('Failed to build grubby')
self.grubby_install_backup(path)
grubby_bin = os.path.join(install_root, 'sbin', 'grubby')
inst_dir = os.path.dirname(path)
if not os.access(inst_dir, os.W_OK):
raise GrubbyInstallException('No permission to copy grubby '
'binary to directory "%s"' % inst_dir)
try:
shutil.copy(grubby_bin, path)
except:
raise GrubbyInstallException('Failed to copy grubby binary to '
'directory "%s"' % inst_dir)
return path
def boot_once(self, title=None):
'''
Configures the bootloader to boot an entry only once
This is not implemented by grubby, but directly implemented here, via
the 'boot_once_' method.
'''
self.log.debug('Title chosen to boot once: %s', title)
available_titles = self.get_titles()
if title not in available_titles:
self.log.error('Entry with title "%s" was not found', title)
return -1
default_title = self.get_default_title()
self.log.debug('Title actually set as default: %s', default_title)
if default_title == title:
self.log.info('Doing nothing: entry to boot once is the same as '
'default entry')
return
else:
self.log.debug('Setting boot once for entry: %s', title)
bootloader = self.get_bootloader()
if bootloader in ('grub', 'grub2', 'elilo'):
entry_index = self._index_for_title(title)
if entry_index is None:
self.log.error('Could not find index for entry with title '
'"%s"', title)
return -1
if bootloader == 'grub':
return self.boot_once_grub(entry_index)
elif bootloader == 'grub2':
return self.boot_once_grub2(entry_index)
elif bootloader == 'yaboot':
return self.boot_once_yaboot(title)
elif bootloader == 'elilo':
return self.boot_once_elilo(entry_index)
else:
self.log.error("Detected bootloader does not implement boot once")
return -1
def boot_once_grub(self, entry_index):
'''
Implements the boot once feature for the grub bootloader
'''
# grubonce is a hack present in distros like OpenSUSE
grubonce_cmd = find_executable('grubonce')
if grubonce_cmd is None:
# XXX: check the type of default set (numeric or "saved")
grub_instructions = ['savedefault --default=%s --once' %
entry_index, 'quit']
grub_instructions_text = '\n'.join(grub_instructions)
grub_binary = find_executable('grub')
if grub_binary is None:
self.log.error("Could not find the 'grub' binary, aborting")
return -1
p = subprocess.Popen([grub_binary, '--batch'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(grub_instructions_text)
complete_out = ''
if out is not None:
complete_out = out
if err is not None:
complete_out += "\n%s" % err
grub_batch_err = []
if complete_out:
for l in complete_out.splitlines():
if re.search('error', l, re.IGNORECASE):
grub_batch_err.append(l)
if grub_batch_err:
self.log.error("Error while running grub to set boot "
"once: %s", "\n".join(grub_batch_err))
return -1
self.log.debug('No error detected while running grub to set boot '
'once')
return 0
else:
rc = self._run_get_return([grubonce_cmd, str(entry_index)])
if rc:
self.log.error('Error running %s', grubonce_cmd)
else:
self.log.debug('No error detected while running %s',
grubonce_cmd)
return rc
def boot_once_grub2(self, entry_index):
'''
Implements the boot once feature for the grub2 bootloader
Caveat: this assumes the default set is of type "saved", and not a
numeric value.
'''
default_index_re = re.compile('\s*set\s+default\s*=\s*\"+(\d+)\"+')
grub_reboot_names = ['grub-reboot', 'grub2-reboot']
grub_reboot_exec = None
for grub_reboot in grub_reboot_names:
grub_reboot_exec = find_executable(grub_reboot)
if grub_reboot_exec is not None:
break
if grub_reboot_exec is None:
self.log.error('Could not find executable among searched names: '
'%s', ' ,'.join(grub_reboot_names))
return -1
grub_set_default_names = ['grub-set-default', 'grub2-set-default']
grub_set_default_exec = None
for grub_set_default in grub_set_default_names:
grub_set_default_exec = find_executable(grub_set_default)
if grub_set_default_exec is not None:
break
if grub_set_default_exec is None:
self.log.error('Could not find executable among searched names: '
'%s', ' ,'.join(grub_set_default_names))
return -1
# Make sure the "set default" entry in the configuration file is set
# to "${saved_entry}. Assuming the config file is at
# /boot/grub/grub.cfg
deb_grub_cfg_path = '/boot/grub/grub.cfg'
deb_grub_cfg_bkp_path = '%s.boottool.bak' % deb_grub_cfg_path
default_index = None
if os.path.exists(deb_grub_cfg_path):
shutil.move(deb_grub_cfg_path, deb_grub_cfg_bkp_path)
o = open(deb_grub_cfg_path, 'w')
for l in open(deb_grub_cfg_bkp_path).readlines():
m = default_index_re.match(l)
if m is not None:
default_index = int(m.groups()[0])
o.write('set default="${saved_entry}"\n')
else:
o.write(l)
o.close()
# Make the current default entry the "previous saved entry"
if default_index is None:
default_index = self.get_default_index()
else:
# grubby adds entries to top. this assumes a new entry to boot once
# has already been added to the top, so fallback to the second
# entry (index 1) if the boot once entry fails to boot
if entry_index == 0:
default_index = 1
else:
default_index = 0
# A negative index is never acceptable
if default_index >= 0:
prev_saved_return = self._run_get_return([grub_set_default_exec,
'%s' % default_index])
if prev_saved_return != 0:
self.log.error(
'Could not make entry %s the previous saved entry',
default_index)
return prev_saved_return
# Finally set the boot once entry
return self._run_get_return([grub_reboot_exec,
'%s' % entry_index])
def boot_once_yaboot(self, entry_title):
'''
Implements the boot once feature for the yaboot bootloader
'''
nvsetenv_cmd = find_executable('nvsetenv')
if nvsetenv_cmd is None:
self.log.error("Could not find nvsetenv in PATH")
return -1
return self._run_get_return([nvsetenv_cmd,
'boot-once',
entry_title])
def boot_once_elilo(self, entry_index):
'''
Implements boot once for machines with kernel >= 2.6
This manipulates EFI variables via the interface available at
/sys/firmware/efi/vars
'''
info = self.get_entry(entry_index)
kernel = os.path.basename(info['kernel'])
# remove quotes
args = info['args']
if args[0] == '"':
args = args[1:]
if args[-1] == '"':
args = args[:-1]
params = "root=%s %s" % (info['root'], args)
data = "%s %s" % (kernel, params)
efi = EfiToolSys()
if not (efi.create_variable('EliloAlt', data)):
return -1
eliloconf = EliloConf()
eliloconf.add_global_option('checkalt')
eliloconf.add_global_option('initrd', os.path.basename(info['initrd']))
eliloconf.remove_global_option('prompt')
eliloconf.update()
return 0
class OptionParser(optparse.OptionParser):
'''
Command line option parser
Aims to maintain compatibility at the command line level with boottool
'''
option_parser_usage = '''%prog [options]'''
def __init__(self, **kwargs):
optparse.OptionParser.__init__(self,
usage=self.option_parser_usage,
**kwargs)
misc = self.add_option_group('MISCELLANEOUS OPTIONS')
misc.add_option('--config-file',
help='Specifies the path and name of the bootloader '
'config file, overriding autodetection of this file')
misc.add_option('--force', action='store_true',
help='If specified, any conflicting kernels will be '
'removed')
misc.add_option('--bootloader',
help='Manually specify the bootloader to use. By '
'default, boottool will automatically try to detect '
'the bootloader being used')
misc.add_option('--root',
help='The device where the root partition is located')
misc.add_option('--debug', default=0,
help='Prints debug messages. This expects a numerical '
'argument corresponding to the debug message '
'verbosity')
probe = self.add_option_group('SYSTEM PROBING')
probe.add_option('--bootloader-probe', action='store_true',
help='Prints the bootloader in use on the system '
'and exits')
probe.add_option('--arch-probe', action='store_true',
help='Prints the arch of the system and exits')
actions = self.add_option_group('ACTIONS ON BOOT ENTRIES')
actions.add_option('--add-kernel',
help='Adds a new kernel with the given path')
actions.add_option('--remove-kernel',
help='Removes the bootloader entry with the given '
'position or title. Also accepts \'start\' or '
'\'end\'')
actions.add_option('--update-kernel',
help='Updates an existing kernel with the given '
'position number or title. Useful options when '
'modifying a kernel include --args and '
'--remove-args')
actions.add_option('--info',
help='Display information about the bootloader entry '
'at the given position number. Also accepts \'all\' '
'or \'default\'')
actions.add_option('--default', action='store_true',
help='Prints the current default kernel for the '
'bootloader')
actions.add_option('--set-default',
help='Updates the bootloader to set the default '
'boot entry to given given position or title')
actions.add_option('--install', action='store_true',
help='Causes bootloader to update and re-install '
'the bootloader file')
actions.add_option('--boot-once', action='store_true',
help='Causes the bootloader to boot the kernel '
'specified by --title just one time, then fall back'
' to the default entry. This option does not work '
'identically on all architectures')
act_args = self.add_option_group('ACTION PARAMETERS')
act_args.add_option('--title',
help='The title or label to use for the '
'bootloader entry. Required when adding a new '
'entry.')
act_args.add_option('--position',
help='Insert bootloader entry at the given '
'position number, counting from 0. Also accepts '
'\'start\' or \'end\'. Optional when adding a new '
'entry.')
act_args.add_option('--make-default', action='store_true',
help='Specifies that the bootloader entry being '
'added should be the new default')
kernel = self.add_option_group('LINUX KERNEL PARAMETERS',
'Options specific to manage boot '
'entries with Linux')
kernel.add_option('--args',
help='Add arguments to be passed to the kernel at '
'boot. Use when adding a new entry or when '
'modifying an existing entry.')
kernel.add_option('--remove-args',
help='Arguments to be removed from an existing entry'
'. Use when modifying an existing entry with '
'--update-kernel action.')
kernel.add_option('--initrd',
help='The initrd image path to use in the bootloader '
'entry')
kernel.add_option('--module',
help='This option adds modules to the new kernel. It'
' only works with Grub Bootloader. For more module '
'options just add another --module parameter')
grubby = self.add_option_group('GRUBBY',
'Manage grubby, the tool that drives '
'most of boottool functionality')
grubby.add_option('--grubby-version', action='store_true',
help='Prints the version of grubby installed on '
'this machine')
grubby.add_option('--grubby-version-check',
help='Checks if the installed version of grubby is '
'recent enough')
grubby.add_option('--grubby-install', action='store_true',
help='Attempts to install a recent enought version '
'of grubby')
grubby.add_option('--grubby-path',
help='Use a different grubby binary, located at the '
'given path')
def opts_has_action(self, opts):
'''
Checks if (parsed) opts has a first class action
'''
global ACTIONS_OPT_METHOD_NAME
has_action = False
for action in ACTIONS_OPT_METHOD_NAME:
value = getattr(opts, action)
if value is not None:
has_action = True
return has_action
def opts_get_action(self, opts):
'''
Gets the selected action from the parsed opts
'''
global ACTIONS_OPT_METHOD_NAME
for action in ACTIONS_OPT_METHOD_NAME:
value = getattr(opts, action)
if value is not None:
return action
return None
def check_values(self, opts, args):
'''
Validate the option the user has supplied
'''
# check if an action has been selected
if not self.opts_has_action(opts):
self.print_help()
raise SystemExit
# check if action needs a --title option
action = self.opts_get_action(opts)
if action in ACTIONS_REQUIRE_TITLE:
if opts.title is None:
print 'Action %s requires a --title parameter' % action
raise SystemExit
return (opts, args)
class BoottoolApp(object):
'''
The boottool application itself
'''
def __init__(self):
self.opts = None
self.args = None
self.option_parser = OptionParser()
self.grubby = None
self.log = logging.getLogger(self.__class__.__name__)
def _parse_command_line(self):
'''
Parsers the command line arguments
'''
(self.opts,
self.args) = self.option_parser.parse_args()
def _configure_logging(self):
'''
Configures logging based on --debug= command line switch
We do not have as many levels as the original boottool(.pl) had, but
we accept the same range of parameters and adjust it to our levels.
'''
log_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
try:
level = int(self.opts.debug)
except ValueError:
level = 0
max_level = max(log_map.keys())
if level > max_level:
level = max_level
if os.environ.has_key('BOOTTOOL_DEBUG_RUN'):
logging_level = logging.DEBUG
else:
logging_level = log_map.get(level)
logging.basicConfig(level=logging_level,
format=LOGGING_FORMAT)
def run(self):
self._parse_command_line()
self._configure_logging()
# if we made this far, the command line checking succeeded
if self.opts.grubby_path:
self.grubby = Grubby(self.opts.grubby_path, self.opts)
else:
install_grubby_if_necessary()
self.grubby = Grubby(opts=self.opts)
if self.opts.bootloader:
self.log.debug('Forcing bootloader "%s"', self.opts.bootloader)
try:
self.grubby._set_bootloader(self.opts.bootloader)
except ValueError, msg:
self.log.error(msg)
sys.exit(-1)
#
# The following implements a simple action -> method dispatcher
# First, we look for a method named action_ + action_name on the
# app instance itself. If not found, we try to find a method with
# the same name as the action in the grubby instance.
#
action_name = self.option_parser.opts_get_action(self.opts)
try:
action_method = getattr(self, "action_%s" % action_name)
except AttributeError:
action_method = getattr(self.grubby, action_name)
if action_method:
result = action_method()
if result is None:
result = 0
elif isinstance(result, str):
print result
result = 0
sys.exit(result)
#
# The following block implements actions. Actions are methods that will be
# called because of user supplied parameters on the command line. Most
# actions, such as the ones that query information, are built around the
# "API" methods defined in the previous block
#
def action_grubby_version(self):
'''
Prints the of grubby that is installed on this machine
'''
version = self.grubby.get_grubby_version()
if version is not None:
print "%s.%s" % version
return
version = self.grubby.get_grubby_version_raw()
if version is not None:
print version
def action_grubby_version_check(self):
'''
Prints the of grubby that is installed on this machine
'''
current_version = self.grubby.get_grubby_version()
if current_version is None:
self.log.warn('Could not get version numbers from grubby')
return -1
required_version = self.opts.grubby_version_check.split('.', 1)
required_version_major = required_version[0]
if len(required_version) == 1:
req_version = (int(required_version_major), 0)
else:
req_version = (int(required_version_major),
int(required_version[1]))
if current_version >= req_version:
return 0
else:
return -1
def action_grubby_install(self):
'''
Attempts to install a recent enough version of grubby
'''
return self.grubby.grubby_install()
def action_info(self):
'''
Prints boot entry information
boottool is frequently called with 'all' lowercase, but
grubby expects it to be uppercase
'''
if not self.opts.info:
self.log.error('Parameter to info is required')
return -1
info_index = self.opts.info
if not ((info_index.lower() == 'all') or
(self.opts.info.isdigit())):
self.log.error('Parameter to info should be either "all", "ALL" '
'or an integer index')
return -1
if info_index == 'all':
info_index = 'ALL'
if info_index == 'ALL':
entries = self.grubby.get_entries()
else:
entries = {info_index: self.grubby.get_entry(info_index)}
for index, entry in entries.items():
print
for key, val in entry.items():
# remove quotes
if isinstance(val, str):
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
print '%-8s: %s' % (key, val)
def action_add_kernel(self):
'''
Adds a new boot entry based on the values of other command line options
:type opts: object
:param opts: parsed command line options
:return:
'''
if not self.opts.add_kernel:
self.log.error("Kernel to add is required")
return -1
if not self.opts.title:
self.log.error("Kernel title is required")
return -1
if not self.opts.initrd:
self.log.error("initrd is required")
return -1
return self.grubby.add_kernel(self.opts.add_kernel,
self.opts.title,
args=self.opts.args,
initrd=self.opts.initrd)
def action_update_kernel(self):
'''
Updates a kernel entry
'''
if not self.opts.update_kernel:
self.log.error("Kernel title to update is required")
return -1
args = []
kernel = self.grubby._get_entry_selection(self.opts.update_kernel)
if kernel is not None:
args.append("--update-kernel=%s" % kernel)
if self.opts.args:
args.append("--args=%s" % self.opts.args)
if self.opts.remove_args:
args.append("--remove-args=%s" % self.opts.remove_args)
return self.grubby._run_grubby_get_return(args)
def action_remove_kernel(self):
'''
Removes a boot entry by the specified title
boottool expects: title
grubby expects: kernel path or special syntax (eg, TITLE=)
'''
if not self.opts.remove_kernel:
self.log.error("Kernel title to remove is required")
return -1
return self.grubby.remove_kernel(self.opts.remove_kernel)
def action_boot_once(self):
"""
Sets a specific entry for the next boot only
The subsequent boots will use the default kernel
"""
if not self.opts.boot_once:
self.log.error("Kernel to boot once is required")
return self.grubby.boot_once(self.opts.title)
def action_default(self):
"""
Get the default entry index
"""
print self.grubby.get_default_index()
def action_set_default(self):
"""
Sets the given entry number to be the default on every next boot
"""
if not self.opts.set_default:
self.log.error("Entry index is required")
return self.grubby.set_default_by_index(self.opts.set_default)
if __name__ == '__main__':
app = BoottoolApp()
app.run()
else:
logging.basicConfig(level=logging.INFO,
format=LOGGING_FORMAT)
callback_classes = [
['void', 'ns3::Mac48Address', 'ns3::Mac48Address', 'unsigned int', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned int', 'ns3::Mac48Address', 'ns3::Mac48Address', 'ns3::dot11s::PeerLink::PeerState', 'ns3::dot11s::PeerLink::PeerState', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr', 'ns3::Ptr', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['std::vector >', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'bool', 'ns3::Ptr', 'ns3::Mac48Address', 'ns3::Mac48Address', 'unsigned short', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned int', 'ns3::Mac48Address', 'ns3::Ptr', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::WifiMacHeader const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr', 'ns3::Mac48Address', 'ns3::Mac48Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'unsigned char', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr', 'ns3::Ptr', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr', 'ns3::Ptr', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from syntaq import __author__ as syntaq_author
from syntaq import __license__ as syntaq_license
from syntaq import __package__ as syntaq_package
from syntaq import __version__ as syntaq_version
from distutils.core import setup
setup(
name=syntaq_package,
version=syntaq_version,
description="Lightweight markup language parser/compiler based on Creole",
long_description="Syntaq is a lightweight markup language based on (and " \
"backward compatible with) Creole.",
author=syntaq_author,
author_email="nigel@nigelsmall.name",
url="http://nigelsmall.com/syntaq",
scripts=[],
packages=["syntaq"],
license=syntaq_license,
classifiers=[]
)
"""
Tests for the CORS CSRF middleware
"""
from mock import patch, Mock
import ddt
from django.test import TestCase
from django.test.utils import override_settings
from django.core.exceptions import MiddlewareNotUsed, ImproperlyConfigured
from django.http import HttpResponse
from django.middleware.csrf import CsrfViewMiddleware
from cors_csrf.middleware import CorsCSRFMiddleware, CsrfCrossDomainCookieMiddleware
SENTINEL = object()
class TestCorsMiddlewareProcessRequest(TestCase):
"""
Test processing a request through the middleware
"""
def get_request(self, is_secure, http_referer):
"""
Build a test request
"""
request = Mock()
request.META = {'HTTP_REFERER': http_referer}
request.is_secure = lambda: is_secure
return request
@override_settings(FEATURES={'ENABLE_CORS_HEADERS': True})
def setUp(self):
super(TestCorsMiddlewareProcessRequest, self).setUp()
self.middleware = CorsCSRFMiddleware()
def check_not_enabled(self, request):
"""
Check that the middleware does NOT process the provided request
"""
with patch.object(CsrfViewMiddleware, 'process_view') as mock_method:
res = self.middleware.process_view(request, None, None, None)
self.assertIsNone(res)
self.assertFalse(mock_method.called)
def check_enabled(self, request):
"""
Check that the middleware does process the provided request
"""
def cb_check_req_is_secure_false(request, callback, args, kwargs):
"""
Check that the request doesn't pass (yet) the `is_secure()` test
"""
self.assertFalse(request.is_secure())
return SENTINEL
with patch.object(CsrfViewMiddleware, 'process_view') as mock_method:
mock_method.side_effect = cb_check_req_is_secure_false
res = self.middleware.process_view(request, None, None, None)
self.assertIs(res, SENTINEL)
self.assertTrue(request.is_secure())
@override_settings(CORS_ORIGIN_WHITELIST=['foo.com'])
def test_enabled(self):
request = self.get_request(is_secure=True, http_referer='https://foo.com/bar')
self.check_enabled(request)
@override_settings(
FEATURES={'ENABLE_CORS_HEADERS': False},
CORS_ORIGIN_WHITELIST=['foo.com']
)
def test_disabled_no_cors_headers(self):
with self.assertRaises(MiddlewareNotUsed):
CorsCSRFMiddleware()
@override_settings(CORS_ORIGIN_WHITELIST=['bar.com'])
def test_disabled_wrong_cors_domain(self):
request = self.get_request(is_secure=True, http_referer='https://foo.com/bar')
self.check_not_enabled(request)
@override_settings(CORS_ORIGIN_WHITELIST=['foo.com'])
def test_disabled_wrong_cors_domain_reversed(self):
request = self.get_request(is_secure=True, http_referer='https://bar.com/bar')
self.check_not_enabled(request)
@override_settings(CORS_ORIGIN_WHITELIST=['foo.com'])
def test_disabled_http_request(self):
request = self.get_request(is_secure=False, http_referer='https://foo.com/bar')
self.check_not_enabled(request)
@override_settings(CORS_ORIGIN_WHITELIST=['foo.com'])
def test_disabled_http_referer(self):
request = self.get_request(is_secure=True, http_referer='http://foo.com/bar')
self.check_not_enabled(request)
@ddt.ddt
class TestCsrfCrossDomainCookieMiddleware(TestCase):
"""Tests for `CsrfCrossDomainCookieMiddleware`. """
REFERER = 'https://www.example.com'
COOKIE_NAME = 'shared-csrftoken'
COOKIE_VALUE = 'abcd123'
COOKIE_DOMAIN = '.edx.org'
@override_settings(
FEATURES={'ENABLE_CROSS_DOMAIN_CSRF_COOKIE': True},
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN
)
def setUp(self):
super(TestCsrfCrossDomainCookieMiddleware, self).setUp()
self.middleware = CsrfCrossDomainCookieMiddleware()
@override_settings(FEATURES={'ENABLE_CROSS_DOMAIN_CSRF_COOKIE': False})
def test_disabled_by_feature_flag(self):
with self.assertRaises(MiddlewareNotUsed):
CsrfCrossDomainCookieMiddleware()
@ddt.data('CROSS_DOMAIN_CSRF_COOKIE_NAME', 'CROSS_DOMAIN_CSRF_COOKIE_DOMAIN')
def test_improperly_configured(self, missing_setting):
settings = {
'FEATURES': {'ENABLE_CROSS_DOMAIN_CSRF_COOKIE': True},
'CROSS_DOMAIN_CSRF_COOKIE_NAME': self.COOKIE_NAME,
'CROSS_DOMAIN_CSRF_COOKIE_DOMAIN': self.COOKIE_DOMAIN
}
del settings[missing_setting]
with override_settings(**settings):
with self.assertRaises(ImproperlyConfigured):
CsrfCrossDomainCookieMiddleware()
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_not_secure(self):
response = self._get_response(is_secure=False)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_not_sending_csrf_token(self):
response = self._get_response(csrf_cookie_used=False)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_not_cross_domain_decorator(self):
response = self._get_response(cross_domain_decorator=False)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_WHITELIST=['other.example.com']
)
def test_skip_if_referer_not_whitelisted(self):
response = self._get_response()
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN
)
def test_skip_if_not_cross_domain(self):
response = self._get_response(
referer="https://courses.edx.org/foo",
host="courses.edx.org"
)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_no_referer(self):
response = self._get_response(delete_referer=True)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_referer_not_https(self):
response = self._get_response(referer="http://www.example.com")
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_ALLOW_ALL=True
)
def test_skip_if_referer_no_protocol(self):
response = self._get_response(referer="example.com")
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ALLOW_INSECURE=True
)
def test_skip_if_no_referer_insecure(self):
response = self._get_response(delete_referer=True)
self._assert_cookie_sent(response, False)
@override_settings(
CROSS_DOMAIN_CSRF_COOKIE_NAME=COOKIE_NAME,
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN=COOKIE_DOMAIN,
CORS_ORIGIN_WHITELIST=['www.example.com']
)
def test_set_cross_domain_cookie(self):
response = self._get_response()
self._assert_cookie_sent(response, True)
def _get_response(self,
is_secure=True,
csrf_cookie_used=True,
cross_domain_decorator=True,
referer=None,
host=None,
delete_referer=False):
"""Process a request using the middleware. """
request = Mock()
request.META = {
'HTTP_REFERER': (
referer if referer is not None
else self.REFERER
)
}
request.is_secure = lambda: is_secure
if host is not None:
request.get_host = lambda: host
if delete_referer:
del request.META['HTTP_REFERER']
if csrf_cookie_used:
request.META['CSRF_COOKIE_USED'] = True
request.META['CSRF_COOKIE'] = self.COOKIE_VALUE
if cross_domain_decorator:
request.META['CROSS_DOMAIN_CSRF_COOKIE_USED'] = True
return self.middleware.process_response(request, HttpResponse())
def _assert_cookie_sent(self, response, is_set):
"""Check that the cross-domain CSRF cookie was sent. """
if is_set:
self.assertIn(self.COOKIE_NAME, response.cookies)
cookie_header = str(response.cookies[self.COOKIE_NAME])
expected = 'Set-Cookie: {name}={value}; Domain={domain};'.format(
name=self.COOKIE_NAME,
value=self.COOKIE_VALUE,
domain=self.COOKIE_DOMAIN
)
self.assertIn(expected, cookie_header)
self.assertIn('Max-Age=31449600; Path=/; secure', cookie_header)
else:
self.assertNotIn(self.COOKIE_NAME, response.cookies)
from django.contrib import admin
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.urls import urlpatterns as auth_urlpatterns
from django.contrib.messages.api import info
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.urls import path, re_path, reverse_lazy
from django.views.decorators.cache import never_cache
from django.views.i18n import set_language
class CustomRequestAuthenticationForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest)
super().__init__(request, *args, **kwargs)
@never_cache
def remote_user_auth_view(request):
"Dummy view for remote user tests"
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
render(request, 'context_processors/auth_attrs_no_access.html')
# *After* rendering, we check whether the session was accessed
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_attr_access(request):
render(request, 'context_processors/auth_attrs_access.html')
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_user(request):
return render(request, 'context_processors/auth_attrs_user.html')
def auth_processor_perms(request):
return render(request, 'context_processors/auth_attrs_perms.html')
def auth_processor_perm_in_perms(request):
return render(request, 'context_processors/auth_attrs_perm_in_perms.html')
def auth_processor_messages(request):
info(request, "Message 1")
return render(request, 'context_processors/auth_attrs_messages.html')
def userpage(request):
pass
@permission_required('unknown.permission')
def permission_required_redirect(request):
pass
@permission_required('unknown.permission', raise_exception=True)
def permission_required_exception(request):
pass
@login_required
@permission_required('unknown.permission', raise_exception=True)
def login_and_permission_required_exception(request):
pass
uid_token = r'(?P[0-9A-Za-z_\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})'
# special urls for auth test cases
urlpatterns = auth_urlpatterns + [
path('logout/custom_query/', views.LogoutView.as_view(redirect_field_name='follow')),
path('logout/next_page/', views.LogoutView.as_view(next_page='/somewhere/')),
path('logout/next_page/named/', views.LogoutView.as_view(next_page='password_reset')),
path('logout/allowed_hosts/', views.LogoutView.as_view(success_url_allowed_hosts={'otherserver'})),
path('remote_user/', remote_user_auth_view),
path('password_reset_from_email/', views.PasswordResetView.as_view(from_email='staffmember@example.com')),
path(
'password_reset_extra_email_context/',
views.PasswordResetView.as_view(
extra_email_context={'greeting': 'Hello!', 'domain': 'custom.example.com'},
),
),
path(
'password_reset/custom_redirect/',
views.PasswordResetView.as_view(success_url='/custom/')),
path(
'password_reset/custom_redirect/named/',
views.PasswordResetView.as_view(success_url=reverse_lazy('password_reset'))),
path(
'password_reset/html_email_template/',
views.PasswordResetView.as_view(
html_email_template_name='registration/html_password_reset_email.html'
)),
re_path(
'^reset/custom/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(success_url='/custom/'),
),
re_path(
'^reset/custom/named/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(success_url=reverse_lazy('password_reset')),
),
re_path(
'^reset/custom/token/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(reset_url_token='set-passwordcustom'),
),
re_path(
'^reset/post_reset_login/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(post_reset_login=True),
),
re_path(
'^reset/post_reset_login_custom_backend/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(
post_reset_login=True,
post_reset_login_backend='django.contrib.auth.backends.AllowAllUsersModelBackend',
),
),
path('password_change/custom/',
views.PasswordChangeView.as_view(success_url='/custom/')),
path('password_change/custom/named/',
views.PasswordChangeView.as_view(success_url=reverse_lazy('password_reset'))),
path('login_required/', login_required(views.PasswordResetView.as_view())),
path('login_required_login_url/', login_required(views.PasswordResetView.as_view(), login_url='/somewhere/')),
path('auth_processor_no_attr_access/', auth_processor_no_attr_access),
path('auth_processor_attr_access/', auth_processor_attr_access),
path('auth_processor_user/', auth_processor_user),
path('auth_processor_perms/', auth_processor_perms),
path('auth_processor_perm_in_perms/', auth_processor_perm_in_perms),
path('auth_processor_messages/', auth_processor_messages),
path(
'custom_request_auth_login/',
views.LoginView.as_view(authentication_form=CustomRequestAuthenticationForm)),
re_path('^userpage/(.+)/$', userpage, name='userpage'),
path('login/redirect_authenticated_user_default/', views.LoginView.as_view()),
path('login/redirect_authenticated_user/',
views.LoginView.as_view(redirect_authenticated_user=True)),
path('login/allowed_hosts/',
views.LoginView.as_view(success_url_allowed_hosts={'otherserver'})),
path('permission_required_redirect/', permission_required_redirect),
path('permission_required_exception/', permission_required_exception),
path('login_and_permission_required_exception/', login_and_permission_required_exception),
path('setlang/', set_language, name='set_language'),
# This line is only required to render the password reset with is_admin=True
path('admin/', admin.site.urls),
]
"""
Test the partitions and partitions service
"""
from collections import defaultdict
from unittest import TestCase
from mock import Mock
from xmodule.partitions.partitions import Group, UserPartition
from xmodule.partitions.partitions_service import PartitionService
class TestGroup(TestCase):
"""Test constructing groups"""
def test_construct(self):
test_id = 10
name = "Grendel"
group = Group(test_id, name)
self.assertEqual(group.id, test_id)
self.assertEqual(group.name, name)
def test_string_id(self):
test_id = "10"
name = "Grendel"
group = Group(test_id, name)
self.assertEqual(group.id, 10)
def test_to_json(self):
test_id = 10
name = "Grendel"
group = Group(test_id, name)
jsonified = group.to_json()
act_jsonified = {
"id": test_id,
"name": name,
"version": group.VERSION
}
self.assertEqual(jsonified, act_jsonified)
def test_from_json(self):
test_id = 5
name = "Grendel"
jsonified = {
"id": test_id,
"name": name,
"version": Group.VERSION
}
group = Group.from_json(jsonified)
self.assertEqual(group.id, test_id)
self.assertEqual(group.name, name)
def test_from_json_broken(self):
test_id = 5
name = "Grendel"
# Bad version
jsonified = {
"id": test_id,
"name": name,
"version": 9001
}
with self.assertRaisesRegexp(TypeError, "has unexpected version"):
group = Group.from_json(jsonified)
# Missing key "id"
jsonified = {
"name": name,
"version": Group.VERSION
}
with self.assertRaisesRegexp(TypeError, "missing value key 'id'"):
group = Group.from_json(jsonified)
# Has extra key - should not be a problem
jsonified = {
"id": test_id,
"name": name,
"version": Group.VERSION,
"programmer": "Cale"
}
group = Group.from_json(jsonified)
self.assertNotIn("programmer", group.to_json())
class TestUserPartition(TestCase):
"""Test constructing UserPartitions"""
def test_construct(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
user_partition = UserPartition(0, 'Test Partition', 'for testing purposes', groups)
self.assertEqual(user_partition.id, 0)
self.assertEqual(user_partition.name, "Test Partition")
self.assertEqual(user_partition.description, "for testing purposes")
self.assertEqual(user_partition.groups, groups)
def test_string_id(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
user_partition = UserPartition("70", 'Test Partition', 'for testing purposes', groups)
self.assertEqual(user_partition.id, 70)
def test_to_json(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
upid = 0
upname = "Test Partition"
updesc = "for testing purposes"
user_partition = UserPartition(upid, upname, updesc, groups)
jsonified = user_partition.to_json()
act_jsonified = {
"id": upid,
"name": upname,
"description": updesc,
"groups": [group.to_json() for group in groups],
"version": user_partition.VERSION
}
self.assertEqual(jsonified, act_jsonified)
def test_from_json(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
upid = 1
upname = "Test Partition"
updesc = "For Testing Purposes"
jsonified = {
"id": upid,
"name": upname,
"description": updesc,
"groups": [group.to_json() for group in groups],
"version": UserPartition.VERSION
}
user_partition = UserPartition.from_json(jsonified)
self.assertEqual(user_partition.id, upid)
self.assertEqual(user_partition.name, upname)
self.assertEqual(user_partition.description, updesc)
for act_group in user_partition.groups:
self.assertIn(act_group.id, [0, 1])
exp_group = groups[act_group.id]
self.assertEqual(exp_group.id, act_group.id)
self.assertEqual(exp_group.name, act_group.name)
def test_from_json_broken(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
upid = 1
upname = "Test Partition"
updesc = "For Testing Purposes"
# Missing field
jsonified = {
"name": upname,
"description": updesc,
"groups": [group.to_json() for group in groups],
"version": UserPartition.VERSION
}
with self.assertRaisesRegexp(TypeError, "missing value key 'id'"):
user_partition = UserPartition.from_json(jsonified)
# Wrong version (it's over 9000!)
jsonified = {
'id': upid,
"name": upname,
"description": updesc,
"groups": [group.to_json() for group in groups],
"version": 9001
}
with self.assertRaisesRegexp(TypeError, "has unexpected version"):
user_partition = UserPartition.from_json(jsonified)
# Has extra key - should not be a problem
jsonified = {
'id': upid,
"name": upname,
"description": updesc,
"groups": [group.to_json() for group in groups],
"version": UserPartition.VERSION,
"programmer": "Cale"
}
user_partition = UserPartition.from_json(jsonified)
self.assertNotIn("programmer", user_partition.to_json())
class StaticPartitionService(PartitionService):
"""
Mock PartitionService for testing.
"""
def __init__(self, partitions, **kwargs):
super(StaticPartitionService, self).__init__(**kwargs)
self._partitions = partitions
@property
def course_partitions(self):
return self._partitions
class MemoryUserTagsService(object):
"""
An implementation of a user_tags XBlock service that
uses an in-memory dictionary for storage
"""
COURSE_SCOPE = 'course'
def __init__(self):
self._tags = defaultdict(dict)
def get_tag(self, scope, key):
"""Sets the value of ``key`` to ``value``"""
print 'GETTING', scope, key, self._tags
return self._tags[scope].get(key)
def set_tag(self, scope, key, value):
"""Gets the value of ``key``"""
self._tags[scope][key] = value
print 'SET', scope, key, value, self._tags
class TestPartitionsService(TestCase):
"""
Test getting a user's group out of a partition
"""
def setUp(self):
groups = [Group(0, 'Group 1'), Group(1, 'Group 2')]
self.partition_id = 0
self.user_tags_service = MemoryUserTagsService()
user_partition = UserPartition(self.partition_id, 'Test Partition', 'for testing purposes', groups)
self.partitions_service = StaticPartitionService(
[user_partition],
user_tags_service=self.user_tags_service,
course_id=Mock(),
track_function=Mock()
)
def test_get_user_group_for_partition(self):
# get a group assigned to the user
group1 = self.partitions_service.get_user_group_for_partition(self.partition_id)
# make sure we get the same group back out if we try a second time
group2 = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertEqual(group1, group2)
# test that we error if given an invalid partition id
with self.assertRaises(ValueError):
self.partitions_service.get_user_group_for_partition(3)
def test_user_in_deleted_group(self):
# get a group assigned to the user - should be group 0 or 1
old_group = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertIn(old_group, [0, 1])
# Change the group definitions! No more group 0 or 1
groups = [Group(3, 'Group 3'), Group(4, 'Group 4')]
user_partition = UserPartition(self.partition_id, 'Test Partition', 'for testing purposes', groups)
self.partitions_service = StaticPartitionService(
[user_partition],
user_tags_service=self.user_tags_service,
course_id=Mock(),
track_function=Mock()
)
# Now, get a new group using the same call - should be 3 or 4
new_group = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertIn(new_group, [3, 4])
# We should get the same group over multiple calls
new_group_2 = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertEqual(new_group, new_group_2)
def test_change_group_name(self):
# Changing the name of the group shouldn't affect anything
# get a group assigned to the user - should be group 0 or 1
old_group = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertIn(old_group, [0, 1])
# Change the group names
groups = [Group(0, 'Group 0'), Group(1, 'Group 1')]
user_partition = UserPartition(self.partition_id, 'Test Partition', 'for testing purposes', groups)
self.partitions_service = StaticPartitionService(
[user_partition],
user_tags_service=self.user_tags_service,
course_id=Mock(),
track_function=Mock()
)
# Now, get a new group using the same call
new_group = self.partitions_service.get_user_group_for_partition(self.partition_id)
self.assertEqual(old_group, new_group)
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL ().
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
###############################################################################
{
'name': 'Show photo switch',
'version': '0.1',
'category': 'Product',
'description': '''
Parameter for user to show photo in product form
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'quotation_photo',
],
'init_xml': [],
'demo': [],
'data': [
'photo_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networkprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of NetworkProfile Avi RESTful Object
description:
- This module is used to configure NetworkProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
name:
description:
- The name of the network profile.
required: true
profile:
description:
- Networkprofileunion settings for networkprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the network profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a network profile for an UDP application
avi_networkprofile:
controller: ''
username: ''
password: ''
name: System-UDP-Fast-Path
profile:
type: PROTOCOL_TYPE_UDP_FAST_PATH
udp_fast_path_profile:
per_pkt_loadbalance: false
session_idle_timeout: 10
snat: true
tenant_ref: admin
'''
RETURN = '''
obj:
description: NetworkProfile (api/networkprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
profile=dict(type='dict', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networkprofile',
set([]))
if __name__ == '__main__':
main()
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.core import checks
def check_user_model(**kwargs):
errors = []
cls = apps.get_model(settings.AUTH_USER_MODEL)
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
errors.append(
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=cls,
id='auth.E001',
)
)
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
errors.append(
checks.Error(
("The field named as the 'USERNAME_FIELD' "
"for a custom user model must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=cls,
id='auth.E002',
)
)
# Check that the username field is unique
if not cls._meta.get_field(cls.USERNAME_FIELD).unique:
if (settings.AUTHENTICATION_BACKENDS ==
['django.contrib.auth.backends.ModelBackend']):
errors.append(
checks.Error(
"'%s.%s' must be unique because it is named as the 'USERNAME_FIELD'." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=None,
obj=cls,
id='auth.E003',
)
)
else:
errors.append(
checks.Warning(
"'%s.%s' is named as the 'USERNAME_FIELD', but it is not unique." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=cls,
id='auth.W004',
)
)
return errors
import logging
from lxml import etree
from regparser.grammar.unified import notice_cfr_p
from regparser.notice.amendments.fetch import fetch_amendments
from regparser.notice.dates import fetch_dates
from regparser.notice.sxs import (build_section_by_section,
find_section_by_section)
from regparser.notice.util import spaces_then_remove, swap_emphasis_tags
from regparser.notice.xml import xmls_for_url
logger = logging.getLogger(__name__)
def build_notice(cfr_title, cfr_part, fr_notice, fetch_xml=True,
xml_to_process=None):
"""Given JSON from the federal register, create our notice structure"""
cfr_parts = {str(ref['part']) for ref in fr_notice['cfr_references']}
if cfr_part:
cfr_parts.add(cfr_part)
notice = {'cfr_title': cfr_title, 'cfr_parts': list(cfr_parts)}
# Copy over most fields
for field in ['comments_close_on', 'document_number', 'publication_date',
'regulation_id_numbers']:
if fr_notice[field]:
notice[field] = fr_notice[field]
if fr_notice['effective_on']:
notice['effective_on'] = fr_notice['effective_on']
notice['initial_effective_on'] = fr_notice['effective_on']
if fr_notice['html_url']:
notice['fr_url'] = fr_notice['html_url']
if fr_notice['citation']:
notice['fr_citation'] = fr_notice['citation']
notice['fr_volume'] = fr_notice['volume']
notice['meta'] = {}
for key in ('dates', 'end_page', 'start_page', 'type'):
notice['meta'][key] = fr_notice[key]
if xml_to_process is not None:
return [process_xml(notice, xml_to_process)]
elif fr_notice['full_text_xml_url'] and fetch_xml:
xmls = xmls_for_url(fr_notice['full_text_xml_url'])
notices = [process_xml(notice, xml) for xml in xmls]
set_document_numbers(notices)
return notices
return [notice]
def split_doc_num(doc_num, effective_date):
""" If we have a split notice, we construct a document number
based on the original document number and the effective date. """
effective_date = ''.join(effective_date.split('-'))
return '{0}_{1}'.format(doc_num, effective_date)
def set_document_numbers(notices):
"""If we have multiple notices (due to being split across multiple
effective dates,) we need to fix their document numbers."""
if len(notices) > 1:
for notice in notices:
notice['document_number'] = split_doc_num(
notice['document_number'], notice['effective_on'])
return notices
def process_sxs(notice, notice_xml):
""" Find and build SXS from the notice_xml. """
sxs = find_section_by_section(notice_xml)
# note we will continue to use cfr_parts[0] as the default SxS label until
# we find a counter example
sxs = build_section_by_section(sxs, notice['meta']['start_page'],
notice['cfr_parts'][0])
notice['section_by_section'] = sxs
# @todo - this can be deleted once we remove process_xml
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
parts = []
for cfr_elm in notice_xml.xpath('//CFR'):
parts.extend(notice_cfr_p.parseString(cfr_elm.text).cfr_parts)
return list(sorted(set(parts)))
def process_xml(notice, notice_xml):
"""Pull out relevant fields from the xml and add them to the notice"""
notice = dict(notice) # defensive copy
if not notice.get('effective_on'):
dates = fetch_dates(notice_xml)
if dates and 'effective' in dates:
notice['effective_on'] = dates['effective'][0]
if not notice.get('cfr_parts'):
cfr_parts = fetch_cfr_parts(notice_xml)
notice['cfr_parts'] = cfr_parts
process_sxs(notice, notice_xml)
amds = fetch_amendments(notice_xml)
if amds:
notice['amendments'] = amds
add_footnotes(notice, notice_xml)
return notice
def add_footnotes(notice, notice_xml):
""" Parse the notice xml for footnotes and add them to the notice. """
notice['footnotes'] = {}
for child in notice_xml.xpath('//FTNT/*'):
spaces_then_remove(child, 'PRTPAGE')
swap_emphasis_tags(child)
ref = child.xpath('.//SU')
if ref:
child.text = ref[0].tail
child.remove(ref[0])
content = child.text
for cc in child:
content += etree.tounicode(cc)
if child.tail:
content += child.tail
notice['footnotes'][ref[0].text] = content.strip()
async def asyncgen():
yield 10
async def run():
{i async for i in asyncgen()}
[i async for i in asyncgen()]
{i: i ** 2 async for i in asyncgen()}
(i ** 2 async for i in asyncgen())
list(i async for i in asyncgen())
dataset = {data for line in gen()
async for data in line
if check(data)}
dataset = {data async for line in asyncgen()
async for data in line
if check(data)}
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.log_helper import get_logger
from test_imperative_qat import TestImperativeQat
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class TestImperativeQatChannelWise(TestImperativeQat):
def set_vars(self):
self.weight_quantize_type = 'channel_wise_abs_max'
self.activation_quantize_type = 'moving_average_abs_max'
print('weight_quantize_type', self.weight_quantize_type)
if __name__ == '__main__':
unittest.main()
"""SCons.Tool.wix
Tool-specific initialization for wix, the Windows Installer XML Tool.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/wix.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Builder
import SCons.Action
import os
def generate(env):
"""Add Builders and construction variables for WiX to an Environment."""
if not exists(env):
return
env['WIXCANDLEFLAGS'] = ['-nologo']
env['WIXCANDLEINCLUDE'] = []
env['WIXCANDLECOM'] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}'
env['WIXLIGHTFLAGS'].append( '-nologo' )
env['WIXLIGHTCOM'] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}"
env['WIXSRCSUF'] = '.wxs'
env['WIXOBJSUF'] = '.wixobj'
object_builder = SCons.Builder.Builder(
action = '$WIXCANDLECOM',
suffix = '$WIXOBJSUF',
src_suffix = '$WIXSRCSUF')
linker_builder = SCons.Builder.Builder(
action = '$WIXLIGHTCOM',
src_suffix = '$WIXOBJSUF',
src_builder = object_builder)
env['BUILDERS']['WiX'] = linker_builder
def exists(env):
env['WIXCANDLE'] = 'candle.exe'
env['WIXLIGHT'] = 'light.exe'
# try to find the candle.exe and light.exe tools and
# add the install directory to light libpath.
for path in os.environ['PATH'].split(os.pathsep):
if not path:
continue
# workaround for some weird python win32 bug.
if path[0] == '"' and path[-1:]=='"':
path = path[1:-1]
# normalize the path
path = os.path.normpath(path)
# search for the tools in the PATH environment variable
try:
files = os.listdir(path)
if env['WIXCANDLE'] in files and env['WIXLIGHT'] in files:
env.PrependENVPath('PATH', path)
# include appropriate flags if running WiX 2.0
if 'wixui.wixlib' in files and 'WixUI_en-us.wxl' in files:
env['WIXLIGHTFLAGS'] = [ os.path.join( path, 'wixui.wixlib' ),
'-loc',
os.path.join( path, 'WixUI_en-us.wxl' ) ]
else:
env['WIXLIGHTFLAGS'] = []
return 1
except OSError:
pass # ignore this, could be a stale PATH entry.
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
#
import bpy
from bpy.types import Menu, Panel
from rna_prop_ui import PropertyPanel
class LAMP_MT_sunsky_presets(Menu):
bl_label = "Sun & Sky Presets"
preset_subdir = "sunsky"
preset_operator = "script.execute_preset"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
draw = Menu.draw_preset
class DataButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
return context.lamp and (engine in cls.COMPAT_ENGINES)
class DATA_PT_context_lamp(DataButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
ob = context.object
lamp = context.lamp
space = context.space_data
split = layout.split(percentage=0.65)
texture_count = len(lamp.texture_slots.keys())
if ob:
split.template_ID(ob, "data")
elif lamp:
split.template_ID(space, "pin_id")
if texture_count != 0:
split.label(text=str(texture_count), icon='TEXTURE')
class DATA_PT_preview(DataButtonsPanel, Panel):
bl_label = "Preview"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
self.layout.template_preview(context.lamp)
class DATA_PT_lamp(DataButtonsPanel, Panel):
bl_label = "Lamp"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
lamp = context.lamp
layout.prop(lamp, "type", expand=True)
split = layout.split()
col = split.column()
sub = col.column()
sub.prop(lamp, "color", text="")
sub.prop(lamp, "energy")
if lamp.type in {'POINT', 'SPOT'}:
sub.label(text="Falloff:")
sub.prop(lamp, "falloff_type", text="")
sub.prop(lamp, "distance")
if lamp.falloff_type == 'LINEAR_QUADRATIC_WEIGHTED':
col.label(text="Attenuation Factors:")
sub = col.column(align=True)
sub.prop(lamp, "linear_attenuation", slider=True, text="Linear")
sub.prop(lamp, "quadratic_attenuation", slider=True, text="Quadratic")
col.prop(lamp, "use_sphere")
if lamp.type == 'AREA':
col.prop(lamp, "distance")
col.prop(lamp, "gamma")
col = split.column()
col.prop(lamp, "use_negative")
col.prop(lamp, "use_own_layer", text="This Layer Only")
col.prop(lamp, "use_specular")
col.prop(lamp, "use_diffuse")
class DATA_PT_sunsky(DataButtonsPanel, Panel):
bl_label = "Sky & Atmosphere"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'SUN') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp.sky
row = layout.row(align=True)
row.prop(lamp, "use_sky")
row.menu("LAMP_MT_sunsky_presets", text=bpy.types.LAMP_MT_sunsky_presets.bl_label)
row.operator("lamp.sunsky_preset_add", text="", icon='ZOOMIN')
row.operator("lamp.sunsky_preset_add", text="", icon='ZOOMOUT').remove_active = True
row = layout.row()
row.active = lamp.use_sky or lamp.use_atmosphere
row.prop(lamp, "atmosphere_turbidity", text="Turbidity")
split = layout.split()
col = split.column()
col.active = lamp.use_sky
col.label(text="Blending:")
sub = col.column()
sub.prop(lamp, "sky_blend_type", text="")
sub.prop(lamp, "sky_blend", text="Factor")
col.label(text="Color Space:")
sub = col.column()
sub.row().prop(lamp, "sky_color_space", expand=True)
sub.prop(lamp, "sky_exposure", text="Exposure")
col = split.column()
col.active = lamp.use_sky
col.label(text="Horizon:")
sub = col.column()
sub.prop(lamp, "horizon_brightness", text="Brightness")
sub.prop(lamp, "spread", text="Spread")
col.label(text="Sun:")
sub = col.column()
sub.prop(lamp, "sun_brightness", text="Brightness")
sub.prop(lamp, "sun_size", text="Size")
sub.prop(lamp, "backscattered_light", slider=True, text="Back Light")
layout.separator()
layout.prop(lamp, "use_atmosphere")
split = layout.split()
col = split.column()
col.active = lamp.use_atmosphere
col.label(text="Intensity:")
col.prop(lamp, "sun_intensity", text="Sun")
col.prop(lamp, "atmosphere_distance_factor", text="Distance")
col = split.column()
col.active = lamp.use_atmosphere
col.label(text="Scattering:")
sub = col.column(align=True)
sub.prop(lamp, "atmosphere_inscattering", slider=True, text="Inscattering")
sub.prop(lamp, "atmosphere_extinction", slider=True, text="Extinction")
class DATA_PT_shadow(DataButtonsPanel, Panel):
bl_label = "Shadow"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type in {'POINT', 'SUN', 'SPOT', 'AREA'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
layout.prop(lamp, "shadow_method", expand=True)
if lamp.shadow_method == 'NOSHADOW' and lamp.type == 'AREA':
split = layout.split()
col = split.column()
col.label(text="Form factor sampling:")
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples X")
sub.prop(lamp, "shadow_ray_samples_y", text="Samples Y")
if lamp.shadow_method != 'NOSHADOW':
split = layout.split()
col = split.column()
col.prop(lamp, "shadow_color", text="")
col = split.column()
col.prop(lamp, "use_shadow_layer", text="This Layer Only")
col.prop(lamp, "use_only_shadow")
if lamp.shadow_method == 'RAY_SHADOW':
split = layout.split()
col = split.column()
col.label(text="Sampling:")
if lamp.type in {'POINT', 'SUN', 'SPOT'}:
sub = col.row()
sub.prop(lamp, "shadow_ray_samples", text="Samples")
sub.prop(lamp, "shadow_soft_size", text="Soft Size")
elif lamp.type == 'AREA':
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples X")
sub.prop(lamp, "shadow_ray_samples_y", text="Samples Y")
col.row().prop(lamp, "shadow_ray_sample_method", expand=True)
if lamp.shadow_ray_sample_method == 'ADAPTIVE_QMC':
layout.prop(lamp, "shadow_adaptive_threshold", text="Threshold")
if lamp.type == 'AREA' and lamp.shadow_ray_sample_method == 'CONSTANT_JITTERED':
row = layout.row()
row.prop(lamp, "use_umbra")
row.prop(lamp, "use_dither")
row.prop(lamp, "use_jitter")
elif lamp.shadow_method == 'BUFFER_SHADOW':
col = layout.column()
col.label(text="Buffer Type:")
col.row().prop(lamp, "shadow_buffer_type", expand=True)
if lamp.shadow_buffer_type in {'REGULAR', 'HALFWAY', 'DEEP'}:
split = layout.split()
col = split.column()
col.label(text="Filter Type:")
col.prop(lamp, "shadow_filter_type", text="")
sub = col.column(align=True)
sub.prop(lamp, "shadow_buffer_soft", text="Soft")
sub.prop(lamp, "shadow_buffer_bias", text="Bias")
col = split.column()
col.label(text="Sample Buffers:")
col.prop(lamp, "shadow_sample_buffers", text="")
sub = col.column(align=True)
sub.prop(lamp, "shadow_buffer_size", text="Size")
sub.prop(lamp, "shadow_buffer_samples", text="Samples")
if lamp.shadow_buffer_type == 'DEEP':
col.prop(lamp, "compression_threshold")
elif lamp.shadow_buffer_type == 'IRREGULAR':
layout.prop(lamp, "shadow_buffer_bias", text="Bias")
split = layout.split()
col = split.column()
col.prop(lamp, "use_auto_clip_start", text="Autoclip Start")
sub = col.column()
sub.active = not lamp.use_auto_clip_start
sub.prop(lamp, "shadow_buffer_clip_start", text="Clip Start")
col = split.column()
col.prop(lamp, "use_auto_clip_end", text="Autoclip End")
sub = col.column()
sub.active = not lamp.use_auto_clip_end
sub.prop(lamp, "shadow_buffer_clip_end", text=" Clip End")
class DATA_PT_area(DataButtonsPanel, Panel):
bl_label = "Area Shape"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'AREA') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
col = layout.column()
col.row().prop(lamp, "shape", expand=True)
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "size")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "size", text="Size X")
sub.prop(lamp, "size_y", text="Size Y")
class DATA_PT_spot(DataButtonsPanel, Panel):
bl_label = "Spot Shape"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'SPOT') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
split = layout.split()
col = split.column()
sub = col.column()
sub.prop(lamp, "spot_size", text="Size")
sub.prop(lamp, "spot_blend", text="Blend", slider=True)
col.prop(lamp, "use_square")
col.prop(lamp, "show_cone")
col = split.column()
col.active = (lamp.shadow_method != 'BUFFER_SHADOW' or lamp.shadow_buffer_type != 'DEEP')
col.prop(lamp, "use_halo")
sub = col.column(align=True)
sub.active = lamp.use_halo
sub.prop(lamp, "halo_intensity", text="Intensity")
if lamp.shadow_method == 'BUFFER_SHADOW':
sub.prop(lamp, "halo_step", text="Step")
class DATA_PT_falloff_curve(DataButtonsPanel, Panel):
bl_label = "Falloff Curve"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type in {'POINT', 'SPOT'} and lamp.falloff_type == 'CUSTOM_CURVE') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
lamp = context.lamp
self.layout.template_curve_mapping(lamp, "falloff_curve", use_negative_slope=True)
class DATA_PT_custom_props_lamp(DataButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.Lamp
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the deriveaddresses rpc call."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import assert_equal, assert_raises_rpc_error
class DeriveaddressesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
assert_raises_rpc_error(-5, "Missing checksum", self.nodes[0].deriveaddresses, "a")
descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)#t6wfjs64"
address = "bcrt1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr34acd5"
assert_equal(self.nodes[0].deriveaddresses(descriptor), [address])
descriptor = descriptor[:-9]
assert_raises_rpc_error(-5, "Missing checksum", self.nodes[0].deriveaddresses, descriptor)
descriptor_pubkey = "wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)#s9ga3alw"
address = "bcrt1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr34acd5"
assert_equal(self.nodes[0].deriveaddresses(descriptor_pubkey), [address])
ranged_descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)#kft60nuy"
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, [1, 2]), ["bcrt1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rq4442dy", "bcrt1qpgptk2gvshyl0s9lqshsmx932l9ccsv265tvaq"])
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, 2), [address, "bcrt1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rq4442dy", "bcrt1qpgptk2gvshyl0s9lqshsmx932l9ccsv265tvaq"])
assert_raises_rpc_error(-8, "Range should not be specified for an un-ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"), [0, 2])
assert_raises_rpc_error(-8, "Range must be specified for a ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"))
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), 10000000000)
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [1000000000, 2000000000])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [2, 0])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [-1, 0])
combo_descriptor = descsum_create("combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)")
assert_equal(self.nodes[0].deriveaddresses(combo_descriptor), ["mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", "mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", address, "2NDvEwGfpEqJWfybzpKPHF2XH3jwoQV3D7x"])
hardened_without_privkey_descriptor = descsum_create("wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/1/0)")
assert_raises_rpc_error(-5, "Cannot derive script without private keys", self.nodes[0].deriveaddresses, hardened_without_privkey_descriptor)
bare_multisig_descriptor = descsum_create("multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)")
assert_raises_rpc_error(-5, "Descriptor does not have a corresponding address", self.nodes[0].deriveaddresses, bare_multisig_descriptor)
if __name__ == '__main__':
DeriveaddressesTest().main()
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines: