text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
from abc import ABCMeta, abstractmethod
import itertools
from strongsup.parse_case import ParseCase
from strongsup.exploration_policy import Beam
from strongsup.rlong.predicate import RLongPredicate
from strongsup.rlong.state import RLongAlchemyObject
from strongsup.rlong.world import RLongAlchemyWorld
################################
# Alchemy
# TODO: Refactor things common to other domains
class AlchemyOracleExplorationPolicy(object):
def infer_paths(self, ex):
return AlchemyOraclePathFinder(ex).all_actual_paths
class AlchemyOraclePathFinder(object):
def __init__(self, ex, debug=False):
self.context = ex.context
self.world = ex.context.world
self.initial_state = self.world.initial_state
self.final_state = ex.answer[0].state
self.num_steps = len(ex.context.utterances)
self.coarse_paths = []
self.find_coarse_paths(self.initial_state, [])
self.all_actual_paths = []
for coarse_path in self.coarse_paths:
self.actual_paths = []
self.find_actual_paths(coarse_path, None, 0)
if debug:
print('-' * 10, [item[1] for item in coarse_path], '-' * 10)
for path in self.actual_paths:
print(' ', path.decisions)
self.all_actual_paths.extend(self.actual_paths)
def find_coarse_paths(self, current_state, path_so_far):
"""Populate self.coarse_paths with coarse paths.
A coarse path is just a list of commands (actions + arguments)
"""
if len(path_so_far) == self.num_steps:
if current_state == self.final_state:
self.coarse_paths.append(path_so_far[:])
return
# Try Pour
for i in range(len(current_state)):
for j in range(len(current_state)):
try:
new_state, command = current_state.apply_action(
'Pour', [current_state[i], current_state[j]])
except Exception as e:
continue
path_so_far.append((current_state, command, new_state))
self.find_coarse_paths(new_state, path_so_far)
path_so_far.pop()
# Try Mix
for i in range(len(current_state)):
try:
new_state, command = current_state.apply_action(
'Mix', [current_state[i]])
except Exception as e:
continue
path_so_far.append((current_state, command, new_state))
self.find_coarse_paths(new_state, path_so_far)
path_so_far.pop()
# Try Drain
for i in range(len(current_state)):
for j in range(1, current_state[i].amount + 1):
try:
new_state, command = current_state.apply_action(
'Drain', [current_state[i], j])
except Exception as e:
continue
path_so_far.append((current_state, command, new_state))
self.find_coarse_paths(new_state, path_so_far)
path_so_far.pop()
def find_actual_paths(self, coarse_path, current_parse_case, current_step):
"""Populate self.actual_paths with actual logical forms."""
if current_step == self.num_steps:
# Finish up the logical form
assert current_parse_case is not None
assert (not isinstance(current_parse_case.denotation, Exception)
and current_parse_case.denotation.world_state == self.final_state), \
repr(['BUG', current_parse_case.path.decisions, current_parse_case.denotation, self.final_state, 'FINAL', coarse_path])
self.actual_paths.append(current_parse_case.path)
return
# Build LF for the current step
current_state, command, new_state = coarse_path[current_step]
if current_parse_case is not None:
assert (not isinstance(current_parse_case.denotation, Exception)
and current_parse_case.denotation.world_state == current_state), \
repr([current_parse_case.path.decisions, current_parse_case.denotation, current_state, command, coarse_path])
history = current_parse_case.denotation.command_history
else:
history = None
args = []
if command[0] == 'Pour':
args.append(list(self.get_object_refs(command[1], current_state, history)))
args.append(list(self.get_object_refs(command[2], current_state, history)))
args.append(list(self.get_action_refs(command[0], current_state, history)))
elif command[0] == 'Mix':
args.append(list(self.get_object_refs(command[1], current_state, history)))
args.append(list(self.get_action_refs(command[0], current_state, history)))
elif command[0] == 'Drain':
args.append(list(self.get_object_refs(command[1], current_state, history)))
args.append(list(self.get_amount_refs(command[2], current_state, history, command[1])))
args.append(list(self.get_action_refs(command[0], current_state, history)))
else:
raise ValueError('Unknown action: {}'.format(command[0]))
for combination in itertools.product(*args):
new_predicates = [y for arg in combination for y in arg]
self.find_actual_paths(coarse_path,
self.extend(current_parse_case, new_predicates),
current_step + 1)
def get_object_refs(self, target_object, current_state, history):
# Pure index
yield ['all-objects', str(target_object.position), 'index']
# Index from the back
if target_object.position == len(current_state):
yield ['all-objects', '-1', 'index']
# Color
if target_object.color is not None:
matched = current_state.apply_join(target_object.color, 'Color')
if len(matched) == 1:
yield [target_object.color, 'PColor']
else:
position = matched.index(target_object) + 1
yield [target_object.color, 'PColor', str(position), 'index']
if position == len(matched):
yield [target_object.color, 'PColor', '-1', 'index']
# History
if history:
for hist_id, hist in enumerate(history):
for arg_id, arg in enumerate(hist):
if (isinstance(arg, RLongAlchemyObject)
and arg.position == target_object.position):
yield [str(hist_id + 1), 'H{}'.format(arg_id)]
yield [str(hist_id - len(history)), 'H{}'.format(arg_id)]
def get_amount_refs(self, amount, current_state, history, target_object):
# Pure number
yield [str(amount)]
# Relative number
if amount == target_object.amount:
yield ['X1/1']
# TODO: Other fractions
# History
if history:
for hist_id, hist in enumerate(history):
for arg_id, arg in enumerate(hist):
if (isinstance(arg, int) and arg == amount):
yield [str(hist_id + 1), 'H{}'.format(arg_id)]
yield [str(hist_id - len(history)), 'H{}'.format(arg_id)]
def get_action_refs(self, action, current_state, history):
yield ['A' + action]
# History
if history:
for hist_id, hist in enumerate(history):
if hist[0] == action:
yield [str(hist_id + 1), 'H0']
yield [str(hist_id - len(history)), 'H0']
def extend(self, current_parse_case, new_predicates):
"""Return a new ParseCase caused by extending current_parse_case
by the predicates in new_predicates.
Args:
current_parse_case (ParseCase or None)
new_predicates (list[RLongPredicate or str])
returns:
ParseCase
"""
for pred in new_predicates:
if not isinstance(pred, RLongPredicate):
pred = RLongPredicate(pred)
if current_parse_case is None:
current_parse_case = ParseCase.initial(self.context)
else:
current_parse_case = ParseCase.extend(current_parse_case)
current_parse_case.decision = pred
return current_parse_case
| ContextualSP/lemon/executor/strongsup/rlong/exploration_policy.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/exploration_policy.py",
"repo_id": "ContextualSP",
"token_count": 4020
} | 242 |
"""Data structures for the tables domain.
We represent denotations with various Python data structures.
Possible denotation types include:
- Unary = set of Things
| InfiniteSet
- ScopedBinary = dict {Object: Unary, ...} (the domain must be finite)
- Relation = string (Used when the relation is mentioned before the entity)
where
- Thing = string (NameValue)
| float (NumberValue)
| Date (DateValue)
- InfiniteSet = NeqInfiniteSet [e.g., (!= obama)]
| RangeInfiniteSet [e.g., (> 9000), (and (>= 4) (< 5))]
| GenericDateInfiniteSet [e.g., (date 2001 7 -1) = July 2001]
"""
import os
import re
import sys
from collections import Container as ContainerABC
################################
# Thing
def parse_number(x):
"""Parse a number from a string."""
return round(float(x), 6)
class Date(object):
"""A date consisting of a year, a month, and a day.
Some but not all fields can be absent (using placeholder -1)
"""
def __init__(self, year, month, day):
if year == -1 and month == -1 and day == -1:
raise ValueError('Invalid date (-1 -1 -1)')
self.year = year
self.month = month
self.day = day
assert month == -1 or 1 <= month <= 12, 'Invalid month: {}'.format(month)
assert day == -1 or 1 <= day <= 31, 'Invalid day: {}'.format(day)
self._hash = hash((self.year, self.month, self.day))
def __str__(self):
return 'Date({}, {}, {})'.format(self.year, self.month, self.day)
__repr__ = __str__
def __eq__(self, other):
if not isinstance(other, Date):
return False
return (self.year == other.year
and self.month == other.month
and self.day == other.day)
def __hash__(self):
return self._hash
def __ne__(self, other):
return not (self == other)
def __cmp__(self, other):
if not isinstance(other, Date):
raise ValueError('Cannot compare Date to {}'.format(type(other)))
if self.year == other.year or self.year == -1 or other.year == -1:
if self.month == other.month or self.month == -1 or other.month == -1:
if self.day == other.day or self.day == -1 or other.day == -1:
return 0
return cmp(self.day, other.day)
return cmp(self.month, other.month)
return cmp(self.year, other.year)
def parse_date(x):
"""Parse a date from a string with format yy-mm-dd."""
x = x.split('-')
assert len(x) == 3, 'Not a valid date: {}'.format(x)
year = -1 if x[0][0].lower() == 'x' else int(x[0])
month = -1 if x[1][0].lower() == 'x' else int(x[1])
day = -1 if x[2][0].lower() == 'x' else int(x[2])
return Date(year, month, day)
def parse_value(x):
"""Parse the string, which may be a number, a date, or a non-numeric string."""
try:
return parse_number(x)
except:
try:
return parse_date(x)
except:
return x
################################
# Type processing
def get_type(x):
"""Return the type signature of x. Used to prevent comparison across types."""
if isinstance(x, float):
# NumberValue
return 'N'
elif isinstance(x, Date):
# DateValue
return 'D'
elif isinstance(x, str):
# NameValue: take the fb:xxx part of fb:xxx.yyy
if not x.startswith('fb:'):
raise ValueError('NameValue does not start with "fb:": {}'.format(x))
tokens = x.split('.')
if len(tokens) != 2:
raise ValueError('{} is not an entity'.format(x))
return tokens[0]
else:
raise ValueError('Unknown type for {}'.format(type(x)))
def ensure_same_type(collection, allowed_types=None):
"""Ensure that all values in the collection have the same type.
Return the agreed type. Throw an error if the type is not agreed.
Args:
collection: A set or a dict where values are sets.
allowed_types: Restriction on the agreed type.
Can be a string, a collection of strings, or None (= allow all).
Returns:
The agreed type
Throws:
ValueError if one of the following happens:
- The collection is not a set or a set-valued dict
- The collection is empty
- Some two items have different types
- Some item does not agree with the allowed types (if specified)
"""
if isinstance(collection, set):
itr = iter(collection)
elif isinstance(collection, dict):
# Iterate over all items in values
itr = (x for v in collection.values() for x in v)
else:
raise ValueError('Bad data type: {}'.format(type(collection)))
if allowed_types and isinstance(allowed_types, str):
allowed_types = [allowed_types]
agreed_type = None
for value in itr:
if agreed_type is None:
agreed_type = get_type(value)
if allowed_types is not None and agreed_type not in allowed_types:
raise ValueError('Type {} is not in allowed types {}'\
.format(agreed_type, allowed_types))
else:
t = get_type(value)
if t != agreed_type:
raise ValueError('Value {} does not have agreed type {}'\
.format(value, agreed_type))
if agreed_type is None:
raise ValueError('The collection is empty: {}'.format(collection))
return agreed_type
################################
# InfiniteSet
class InfiniteSet(ContainerABC):
"""An abstract class representing an infinite set of items."""
def __and__(self, stuff):
if isinstance(stuff, set):
return {x for x in stuff if x in self}
raise NotImplementedError
def __rand__(self, stuff):
return self & stuff
THINGS = (str, float, Date)
COMPARABLES = (float, Date)
COMPUTABLES = (float,)
UNARIES = (set, InfiniteSet)
BINARIES = (dict,)
class NeqInfiniteSet(InfiniteSet):
"""Represent (!= xxx).
Note that the semantics of (!= xxx) is
"things that are not xxx but have the same type as xxx"
"""
def __init__(self, value):
assert isinstance(value, THINGS), 'Invalid value for !=: {}'.format(value)
self.value = value
self.value_type = get_type(value)
def __eq__(self, other):
return (isinstance(other, NeqInfiniteSet)
and self.value == other.value)
def __hash__(self):
return 12345 + hash(self.value)
def __repr__(self):
return '{{ != {} }}'.format(self.value)
def __contains__(self, x):
# Need to type check
return x != self.value and get_type(x) == self.value_type
class RangeInfiniteSet(InfiniteSet):
"""Represent ranges like (> xxx) or (and (> xxx) (< yyy)).
xxx, yyy can be numbers or dates, but the types must agree.
"""
def __init__(self, sign, value, sign2=None, value2=None):
self.left_sign = self.left_value = None
self.right_sign = self.right_value = None
assert isinstance(value, COMPARABLES), \
'Invalid value for comparison: {}'.format(value)
self.value_type = get_type(value)
if sign in ('>', '>='):
self.left_sign = sign
self.left_value = value
elif sign in ('<', '<='):
self.right_sign = sign
self.right_value = value
else:
raise NotImplementedError(sign)
if sign2 is not None:
assert self.value_type == get_type(value2), \
'Invalid value for comparison: {}'.format(value2)
if sign2 in ('>', '>='):
assert self.left_sign is None
self.left_sign = sign2
self.left_value = value2
elif sign2 in ('<', '<='):
assert self.right_sign is None
self.right_sign = sign2
self.right_value = value2
else:
raise NotImplementedError(sign2)
def __eq__(self, other):
return (isinstance(other, RangeInfiniteSet)
and self.left_sign == other.left_sign
and self.right_sign == other.right_sign
and self.left_value == other.left_value
and self.right_value == other.right_value)
def __hash__(self):
return hash((self.left_sign, self.right_sign, self.left_value, self.right_value))
def __repr__(self):
if self.left_sign is None:
return '{{ {} {} }}'.format(self.right_sign, self.right_value)
if self.left_sign is None:
return '{{ {} {} }}'.format(self.left_sign, self.left_value)
else:
return '{{ {} {} ; {} {} }}'.format(
self.left_sign, self.left_value,
self.right_sign, self.right_value)
def __contains__(self, x):
if self.value_type != get_type(x):
return False
if self.left_sign:
if ((self.left_sign == '>' and x <= self.left_value)
or (self.left_sign == '>=' and x < self.left_value)):
return False
if self.right_sign:
if ((self.right_sign == '<' and x >= self.right_value)
or (self.right_sign == '<=' and x > self.right_value)):
return False
return True
def __and__(self, stuff):
try:
return super(RangeInfiniteSet, self).__and__(stuff)
except NotImplementedError:
if isinstance(stuff, RangeInfiniteSet):
# ULTIMATE RANGE MERGE!!!
assert self.value_type == stuff.value_type,\
'Incompatible types: {} and {}'.format(self.value_type, stuff.value_type)
# Left
if (not self.left_sign
or (stuff.left_sign and (
stuff.left_value > self.left_value
or (stuff.left_value == self.left_value
and stuff.left_sign == '>')))):
new_left_sign = stuff.left_sign
new_left_value = stuff.left_value
else:
new_left_sign = self.left_sign
new_left_value = self.left_value
# Right
if (not self.right_sign
or (stuff.right_sign and (
stuff.right_value < self.right_value
or (stuff.right_value == self.right_value
and stuff.right_sign == '<')))):
new_right_sign = stuff.right_sign
new_right_value = stuff.right_value
else:
new_right_sign = self.right_sign
new_right_value = self.right_value
# Return value
if not new_left_sign:
if not new_right_sign:
return set()
return RangeInfiniteSet(new_right_sign, new_right_value)
elif not new_right_sign:
return RangeInfiniteSet(new_left_sign, new_left_value)
if new_left_value > new_right_value:
return set()
elif new_left_value == new_right_value:
if new_left_sign == '>' or new_right_sign == '<':
return set()
return {new_left_value}
else:
return RangeInfiniteSet(new_left_sign, new_left_value,
new_right_sign, new_right_value)
class GenericDateInfiniteSet(InfiniteSet):
"""Represent a generic date where a year, month, or day is left unspecified.
For example, (-1 7 -1) represents the set of all dates with month = 7,
and (1990 7 -1) is the set of dates with year = 1990 and month = 7.
"""
def __init__(self, date):
self.year = date.year
self.month = date.month
self.day = date.day
def __eq__(self, other):
return (isinstance(other, GenericDateInfiniteSet)
and self.year == other.year
and self.month == other.month
and self.day == other.day)
def __hash__(self):
return hash((self.year, self.month, self.day))
def __repr__(self):
return '{{ {} {} {} }}'.format(self.year, self.month, self.day)
def __contains__(self, x):
if not isinstance(x, Date):
return False
return (self.year in (-1, x.year)
and self.month in (-1, x.month)
and self.day in (-1, x.day))
def min_(self):
# Note that the returned value might not be a valid date.
# The value is nevertheless ok for comparison.
if self.day != -1: # ....-..-07
return Date(self.year, self.month, self.day)
if self.month != -1: # ....-07-xx
return Date(self.year, self.month, 1)
if self.year != -1: # 1907-xx-xx
return Date(self.year, 1, 1)
def max_(self):
if self.day != -1: # ....-..-07
return Date(self.year, self.month, self.day)
if self.month != -1: # ....-07-xx
if self.month == 2:
if self.year % 4 == 0 and self.year % 400 != 0:
num_days = 29
else:
num_days = 28
elif self.month in (1, 3, 5, 7, 8, 10, 12):
num_days = 31
else:
num_days = 30
return Date(self.year, self.month, num_days)
if self.year != -1: # 1907-xx-xx
return Date(self.year, 12, 31)
| ContextualSP/lemon/executor/strongsup/tables/structure.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/tables/structure.py",
"repo_id": "ContextualSP",
"token_count": 6455
} | 243 |
import copy
import math
import numpy as np
import pytest
from numpy.testing import assert_allclose
from strongsup.parse_case import ParseCase, ParsePath
from strongsup.predicate import Predicate
from strongsup.tests.utils import PredicateGenerator, softmax
class ParseCaseTester(object):
def test_previous_decisions(self, case, previous_decisions):
assert case.previous_decisions == previous_decisions
def test_eq(self, case, equal_case, diff_case):
assert case == equal_case
assert case != diff_case
def test_no_dict(self, case):
with pytest.raises(AttributeError):
case.__dict__
def test_set_once(self, case, decision, logits):
p = PredicateGenerator(case.context)
c3 = ParseCase.extend(case, [p('a'), p('b')])
c3.decision = p('b')
c3.choice_logits = [1.0, 2.0]
assert c3.decision == p('b')
assert c3.choice_logits == [1.0, 2.0]
with pytest.raises(RuntimeError):
c3.decision = p('a')
with pytest.raises(RuntimeError):
c3.choice_logits = [3.0, 4.0]
def test_previous_cases(self, case, previous_cases):
for c1, c2 in zip(case._previous_cases, previous_cases):
assert c1 == c2
def test_path(self, case, path):
assert case.path == path
class BasicTestCase(object):
@pytest.fixture
def context(self):
return 'some context'
@pytest.fixture
def predicate_generator(self, context):
return PredicateGenerator(context)
@classmethod
def create_cases(cls, context):
p = PredicateGenerator(context)
c0 = ParseCase.initial(context, [p('a'), p('b'), p('c')])
c0.decision = p('b')
c0.choice_logits = [1., 2., 3.]
c0.choice_probs = softmax(c0.choice_logits)
c1 = ParseCase.extend(c0, [p('c'), p('d'), p('e')])
c1.decision = p('e')
c1.choice_logits = [1., 2., 3.]
c1.choice_probs = softmax(c1.choice_logits)
c2 = ParseCase.extend(c1, [p('f'), p('g')])
c2.decision = p('f')
c2.choice_logits = [5., 6.]
c2.choice_probs = softmax(c2.choice_logits)
return [c0, c1, c2]
class TestRecursiveParseCase(ParseCaseTester, BasicTestCase):
@pytest.fixture
def cases(self, context):
return self.create_cases(context)
@pytest.fixture
def previous_cases(self, cases):
return cases[:-1]
@pytest.fixture
def path(self, cases):
return ParsePath(cases)
@pytest.fixture
def case(self, cases):
return cases[-1] # last case
@pytest.fixture
def equal_case(self, context):
cases = self.create_cases(context)
return cases[-1] # just like case
@pytest.fixture
def diff_case(self, context):
cases = self.create_cases(context)
return cases[0]
@pytest.fixture
def previous_decisions(self, predicate_generator):
p = predicate_generator
return [p('b'), p('e')]
@pytest.fixture
def decision(self, predicate_generator):
p = predicate_generator
return p('f')
@pytest.fixture
def logits(self):
return [5.0, 6.0]
def test_previous_decided(self, case, predicate_generator):
p = predicate_generator
c1 = ParseCase.extend(case, [p('1'), p('2')])
with pytest.raises(RuntimeError):
# didn't set a decision on c1
ParseCase.extend(c1, [p('3'), p('4')])
def test_copy(self, case, predicate_generator):
p = predicate_generator
c = copy.copy(case)
assert c.decision == p('f')
assert c.choice_logits == [5., 6.]
assert c == case
class TestParsePath(BasicTestCase):
@pytest.fixture
def cases(self, context):
return TestRecursiveParseCase.create_cases(context)
@pytest.fixture
def case(self, cases):
return cases[-1]
def test_decisions(self, case, predicate_generator):
p = predicate_generator
assert case.path.decisions == [p('b'), p('e'), p('f')]
def test_prob(self, case):
e = math.exp
assert_allclose(case.prob, e(5) / (e(5) + e(6)))
path_prob = (
e(2) / (e(1) + e(2) + e(3))
* e(3) / (e(1) + e(2) + e(3))
* e(5) / (e(5) + e(6)))
assert_allclose(case.cumulative_prob, path_prob)
assert_allclose(case.path.prob, path_prob)
def test_prob_some_more(self):
empty_path = ParsePath([], context='hello')
assert empty_path.prob == 1.
| ContextualSP/lemon/executor/strongsup/tests/test_parse_case.py/0 | {
"file_path": "ContextualSP/lemon/executor/strongsup/tests/test_parse_case.py",
"repo_id": "ContextualSP",
"token_count": 2109
} | 244 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
from argparse import ArgumentParser
from fairseq_cli.train import cli_main as fairseq_train
from fairseq_cli.generate import cli_main as fairseq_generate
import logging
import shlex
import re
import os
sys.path.append('../')
# from model_interface import TAPEXModelInterface
from model_eval import evaluate_generate_file
from collections import Counter
logger = logging.getLogger(__name__)
def set_parser(parser):
parser.add_argument("--dataset-dir", type=str, required=True, default="",
help="dataset directory where train.src is located in")
parser.add_argument("--exp-dir", type=str, default="checkpoints",
help="experiment directory which stores the checkpoint weights")
parser.add_argument("--model-path", type=str, default="tapex.base/model.pt",
help="the directory of pre-trained model path")
parser.add_argument("--model-arch", type=str, default="bart_base", choices=["bart_large", "bart_base"],
help="tapex large should correspond to bart_large, and tapex base should be bart_base")
# train_parser.add_argument("--max-tokens", type=int, default=1536,
# help="if you train a large model on 16GB memory, max-tokens should be empirically "
# "set as 1536, and can be near-linearly increased according to your GPU memory.")
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--gradient-accumulation", type=int, default=8,
help="the accumulation steps to arrive a equal batch size, the default value can be used"
"to reproduce our results. And you can also reduce it to a proper value for you.")
parser.add_argument("--total-num-update", type=int, default=20000,
help="the total optimization training steps")
parser.add_argument("--learning-rate", type=float, default=3e-5,
help="the peak learning rate for model training")
parser.add_argument("--warmup-steps", type=int, default=1500,
help="warmup steps")
parser.add_argument("--seed", type=int, default=1,
help="random seed")
parser.add_argument("--wandb-project", type=str, default='universal_pretrain_bart',
help="wandb-project")
parser.add_argument("--label-smoothing", type=float, default=0.1,
help="label smoothing")
parser.add_argument("--sub-dir", type=str, default="valid", choices=["train", "valid", "test"],
help="the directory of pre-trained model path, and the default should be in"
"{bart.base, bart.large, tapex.base, tapex.large}.")
parser.add_argument("--predict-dir", type=str, default="predict",
help="the predict folder of generated result.")
def train_fairseq_model(args):
cmd = f"""
fairseq-train {args.dataset_dir} \
--save-dir {args.exp_dir} \
--restore-file {args.model_path} \
--arch {args.model_arch} \
--memory-efficient-fp16 \
--task translation \
--criterion label_smoothed_cross_entropy \
--source-lang src \
--target-lang tgt \
--truncate-source \
--label-smoothing {args.label_smoothing} \
--max-source-positions 1024 \
--batch-size {args.batch_size} \
--update-freq {args.gradient_accumulation} \
--max-update {args.total_num_update} \
--required-batch-size-multiple 1 \
--dropout 0.1 \
--attention-dropout 0.1 \
--relu-dropout 0.0 \
--weight-decay 0.01 \
--optimizer adam \
--adam-eps 1e-08 \
--clip-norm 0.1 \
--lr-scheduler polynomial_decay \
--lr {args.learning_rate} \
--total-num-update {args.total_num_update} \
--warmup-updates {args.warmup_steps} \
--seed {args.seed} \
--ddp-backend no_c10d \
--num-workers 20 \
--reset-meters \
--reset-optimizer \
--reset-dataloader \
--share-all-embeddings \
--layernorm-embedding \
--share-decoder-input-output-embed \
--skip-invalid-size-inputs-valid-test \
--log-format json \
--log-interval 10 \
--save-interval-updates 500 \
--validate-interval 50 \
--save-interval 50 \
--patience 200 \
--report-accuracy \
--wandb-project {args.wandb_project}
"""
sys.argv = shlex.split(cmd)
logger.info("Begin to train model for dataset {}".format(args.dataset_dir))
logger.info("Running command {}".format(re.sub("\s+", " ", cmd.replace("\n", " "))))
fairseq_train()
def evaluate_fairseq_model(args):
cmd = f"""
fairseq-generate
--path {args.model_path} \
{args.dataset_dir} \
--truncate-source \
--gen-subset {args.sub_dir} \
--batch-size {args.batch_size} \
--nbest 1 \
--source-lang src \
--target-lang tgt \
--results-path {args.predict_dir} \
--beam 5 \
--bpe gpt2 \
--remove-bpe \
--num-workers 20 \
--skip-invalid-size-inputs-valid-test
"""
sys.argv = shlex.split(cmd)
logger.info("Begin to evaluate model on the {} subset of dataset {}".format(args.sub_dir, args.dataset_dir))
logger.info("Running command {}".format(re.sub("\s+", " ", cmd.replace("\n", " "))))
fairseq_generate()
# after generation, we should call TAPEX evaluate function to evaluate the result
generate_file = os.path.join(args.predict_dir, "generate-{}.txt".format(args.sub_dir))
# the delimiter is the answer delimiter used in training, which by default is a comma
evaluate_generate_file(generate_file, target_delimiter=", ")
def eval_all_checkpoints(args):
for args.sub_dir in ['valid', 'test']:
all_checkpoint_name_list = [item for item in list(os.listdir(args.exp_dir)) if item.endswith('.pt')]
print(all_checkpoint_name_list)
print('{} checkpoints needs to evaluate'.format(len(all_checkpoint_name_list)))
for model_name in all_checkpoint_name_list:
args.model_path = os.path.join(args.exp_dir, model_name)
args.predict_dir = args.model_path[:-3]
evaluate_fairseq_model(args)
def post_eval(eval_file, data_file):
eval_lines = open(eval_file, 'r').readlines()[1:]
data_lines = open(data_file, 'r').readlines()
result_1utts_list = []
result_2utts_list = []
result_3utts_list = []
result_4utts_list = []
result_5utts_list = []
for line in eval_lines:
# print(line)
result, _, _, source, id = line.strip().split('\t')
assert source.strip() == data_lines[int(id)].strip()
if int(id) % 5 == 0:
result_1utts_list.append(result)
elif int(id) % 5 == 1:
result_2utts_list.append(result)
elif int(id) % 5 == 2:
result_3utts_list.append(result)
elif int(id) % 5 == 3:
result_4utts_list.append(result)
elif int(id) % 5 == 4:
result_5utts_list.append(result)
result_1utts = Counter(result_1utts_list)
result_2utts = Counter(result_2utts_list)
result_3utts = Counter(result_3utts_list)
result_4utts = Counter(result_4utts_list)
result_5utts = Counter(result_5utts_list)
result_1utts = result_1utts['True'] / sum(result_1utts.values())
result_3utts = result_3utts['True'] / sum(result_3utts.values())
result_5utts = result_5utts['True'] / sum(result_5utts.values())
return round(result_1utts,3), round(result_3utts,3), round(result_5utts,3)
def post_eval_with_generated_file(args):
result_1utts_dict = {}
result_3utts_dict = {}
result_5utts_dict = {}
all_checkpoint_name_list = [item for item in list(os.listdir(args.exp_dir)) if item.endswith('.pt')]
print('{} checkpoints needs to evaluate'.format(len(all_checkpoint_name_list)))
for model_name in all_checkpoint_name_list:
model_path = os.path.join(args.exp_dir, model_name)
predict_dir = model_path[:-3]
eval_file = os.path.join(predict_dir, 'generate-valid.txt.eval')
data_file = os.path.join(args.dataset_dir, '../dev.src')
result_1utts, result_3utts, result_5utts = post_eval(eval_file, data_file)
print("path: {}, stage: {}, 1utts: {}, 3utts: {}, 5utts: {}".format(model_path, 'valid', result_1utts, result_3utts, result_5utts))
result_1utts_dict[model_path] = result_1utts
result_3utts_dict[model_path] = result_3utts
result_5utts_dict[model_path] = result_5utts
eval_file_test = os.path.join(predict_dir, 'generate-test.txt.eval')
data_file_test = os.path.join(args.dataset_dir, '../test.src')
result_1utts_test, result_3utts_test, result_5utts_test = post_eval(eval_file_test, data_file_test)
print("path: {}, stage: {}, 1utts: {}, 3utts: {}, 5utts: {}".format(model_path, 'test', result_1utts_test, result_3utts_test, result_5utts_test))
print('~~~')
best_key = max(result_5utts_dict, key=result_5utts_dict.get)
print(best_key)
print(result_1utts_dict[best_key])
print(result_3utts_dict[best_key])
print(result_5utts_dict[best_key])
print('**************************************************************')
if __name__ == '__main__':
parser = ArgumentParser()
set_parser(parser)
args = parser.parse_args()
train_fairseq_model(args)
eval_all_checkpoints(args)
post_eval_with_generated_file(args)
| ContextualSP/lemon/lemon/run_model_finetune.py/0 | {
"file_path": "ContextualSP/lemon/lemon/run_model_finetune.py",
"repo_id": "ContextualSP",
"token_count": 4412
} | 245 |
{"id":"Mercury_417466","answerKey":"A"}
{"id":"Mercury_7081673","answerKey":"B"}
{"id":"Mercury_7239733","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_8","answerKey":"D"}
{"id":"Mercury_7037258","answerKey":"B"}
{"id":"CSZ20679","answerKey":"C"}
{"id":"Mercury_182158","answerKey":"A"}
{"id":"Mercury_7216668","answerKey":"C"}
{"id":"MCAS_2001_5_19","answerKey":"C"}
{"id":"Mercury_SC_413631","answerKey":"A"}
{"id":"MCAS_2005_5_10","answerKey":"B"}
{"id":"Mercury_7166145","answerKey":"B"}
{"id":"MSA_2013_5_6","answerKey":"B"}
{"id":"Mercury_SC_405199","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_28","answerKey":"2"}
{"id":"Mercury_SC_401777","answerKey":"B"}
{"id":"Mercury_7198188","answerKey":"B"}
{"id":"MCAS_1998_4_11","answerKey":"D"}
{"id":"Mercury_SC_LBS10784","answerKey":"B"}
{"id":"Mercury_7033548","answerKey":"D"}
{"id":"Mercury_7146195","answerKey":"D"}
{"id":"NCEOGA_2013_8_36","answerKey":"D"}
{"id":"Mercury_SC_415412","answerKey":"D"}
{"id":"Mercury_7126840","answerKey":"C"}
{"id":"Mercury_SC_408362","answerKey":"B"}
{"id":"MDSA_2008_8_35","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_21","answerKey":"2"}
{"id":"VASoL_2008_5_12","answerKey":"B"}
{"id":"Mercury_SC_LBS10680","answerKey":"A"}
{"id":"MCAS_2003_5_3","answerKey":"B"}
{"id":"VASoL_2010_5_18","answerKey":"A"}
{"id":"Mercury_7074900","answerKey":"C"}
{"id":"Mercury_SC_408762","answerKey":"A"}
{"id":"ACTAAP_2015_5_8","answerKey":"A"}
{"id":"Mercury_7263008","answerKey":"C"}
{"id":"MCAS_2000_4_29","answerKey":"A"}
{"id":"CSZ_2008_5_CSZ10233","answerKey":"D"}
{"id":"Mercury_7128660","answerKey":"C"}
{"id":"Mercury_7100520","answerKey":"C"}
{"id":"Mercury_7017728","answerKey":"B"}
{"id":"Mercury_7032865","answerKey":"A"}
{"id":"Mercury_405057","answerKey":"B"}
{"id":"Mercury_7057593","answerKey":"A"}
{"id":"Mercury_SC_405231","answerKey":"C"}
{"id":"VASoL_2007_5_38","answerKey":"A"}
{"id":"Mercury_SC_410895","answerKey":"D"}
{"id":"Mercury_7015978","answerKey":"D"}
{"id":"ACTAAP_2010_5_5","answerKey":"C"}
{"id":"Mercury_7080955","answerKey":"D"}
{"id":"ACTAAP_2009_5_4","answerKey":"D"}
{"id":"Mercury_7017010","answerKey":"A"}
{"id":"Mercury_7146108","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_4_12","answerKey":"C"}
{"id":"VASoL_2009_5_3","answerKey":"A"}
{"id":"Mercury_SC_401253","answerKey":"C"}
{"id":"Mercury_7235953","answerKey":"B"}
{"id":"Mercury_403974","answerKey":"D"}
{"id":"Mercury_7241063","answerKey":"C"}
{"id":"MCAS_2010_8_12015","answerKey":"B"}
{"id":"Mercury_7168350","answerKey":"A"}
{"id":"Mercury_SC_402980","answerKey":"B"}
{"id":"Mercury_417126","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_2","answerKey":"4"}
{"id":"NYSEDREGENTS_2010_8_13","answerKey":"2"}
{"id":"Mercury_177223","answerKey":"A"}
{"id":"Mercury_7200533","answerKey":"A"}
{"id":"Mercury_SC_400707","answerKey":"B"}
{"id":"Mercury_SC_400380","answerKey":"B"}
{"id":"Mercury_7205590","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_4_30","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_4_17","answerKey":"A"}
{"id":"VASoL_2008_3_5","answerKey":"D"}
{"id":"Mercury_SC_415390","answerKey":"D"}
{"id":"Mercury_SC_408436","answerKey":"B"}
{"id":"Mercury_SC_401143","answerKey":"C"}
{"id":"LEAP_2005_8_10403","answerKey":"D"}
{"id":"Mercury_7108045","answerKey":"B"}
{"id":"Mercury_SC_402251","answerKey":"B"}
{"id":"Mercury_7071418","answerKey":"B"}
{"id":"Mercury_SC_407370","answerKey":"A"}
{"id":"Mercury_7084595","answerKey":"D"}
{"id":"Mercury_7041335","answerKey":"A"}
{"id":"Mercury_7236320","answerKey":"C"}
{"id":"LEAP__5_10308","answerKey":"A"}
{"id":"Mercury_7068425","answerKey":"B"}
{"id":"Mercury_SC_401308","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_4_18","answerKey":"A"}
{"id":"Mercury_7165550","answerKey":"A"}
{"id":"Mercury_7114923","answerKey":"C"}
{"id":"Mercury_SC_408434","answerKey":"D"}
{"id":"Mercury_7008453","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_14","answerKey":"3"}
{"id":"NYSEDREGENTS_2015_8_24","answerKey":"3"}
{"id":"NYSEDREGENTS_2008_4_6","answerKey":"B"}
{"id":"Mercury_400279","answerKey":"B"}
{"id":"Mercury_7092190","answerKey":"A"}
{"id":"Mercury_7113890","answerKey":"A"}
{"id":"TAKS_2009_8_17","answerKey":"C"}
{"id":"AKDE&ED_2008_4_3","answerKey":"B"}
{"id":"Mercury_7202353","answerKey":"C"}
{"id":"MCAS_2006_9_14","answerKey":"B"}
{"id":"MSA_2015_5_37","answerKey":"D"}
{"id":"MDSA_2010_8_22","answerKey":"B"}
{"id":"MCAS_1999_4_4","answerKey":"C"}
{"id":"Mercury_7247923","answerKey":"D"}
{"id":"Mercury_SC_416124","answerKey":"C"}
{"id":"Mercury_407530","answerKey":"B"}
{"id":"Mercury_7271740","answerKey":"C"}
{"id":"Mercury_7026495","answerKey":"C"}
{"id":"Mercury_7084315","answerKey":"C"}
{"id":"TIMSS_2011_4_pg51","answerKey":"B"}
{"id":"Mercury_7081148","answerKey":"B"}
{"id":"Mercury_SC_400040","answerKey":"B"}
{"id":"ACTAAP_2010_7_10","answerKey":"C"}
{"id":"ACTAAP_2013_7_4","answerKey":"C"}
{"id":"Mercury_SC_405444","answerKey":"B"}
{"id":"Mercury_7213675","answerKey":"B"}
{"id":"MEA_2010_8_15-v1","answerKey":"B"}
{"id":"Mercury_7094675","answerKey":"A"}
{"id":"Mercury_403912","answerKey":"A"}
{"id":"Mercury_7009853","answerKey":"A"}
{"id":"Mercury_SC_LBS10020","answerKey":"C"}
{"id":"MEA_2013_8_2","answerKey":"B"}
{"id":"Mercury_7068688","answerKey":"C"}
{"id":"Mercury_7271705","answerKey":"C"}
{"id":"Mercury_402456","answerKey":"C"}
{"id":"NCEOGA_2013_8_6","answerKey":"A"}
{"id":"Mercury_7230178","answerKey":"C"}
{"id":"Mercury_7215233","answerKey":"C"}
{"id":"Mercury_SC_415535","answerKey":"B"}
{"id":"CSZ_2008_8_29","answerKey":"C"}
{"id":"MSA_2012_5_28","answerKey":"A"}
{"id":"MCAS_2010_8_12008","answerKey":"B"}
{"id":"Mercury_SC_406724","answerKey":"A"}
{"id":"TIMSS_2007_8_pg33","answerKey":"B"}
{"id":"Mercury_7004988","answerKey":"D"}
{"id":"Mercury_SC_LBS10946","answerKey":"A"}
{"id":"TAKS_2009_8_39","answerKey":"B"}
{"id":"Mercury_SC_408357","answerKey":"D"}
{"id":"Mercury_178710","answerKey":"C"}
{"id":"Mercury_410598","answerKey":"B"}
{"id":"TIMSS_2011_4_pg15","answerKey":"D"}
{"id":"Mercury_7217088","answerKey":"B"}
{"id":"Mercury_7143360","answerKey":"D"}
{"id":"MCAS_2012_8_23645","answerKey":"A"}
{"id":"LEAP__4_10228","answerKey":"D"}
{"id":"MSA_2012_8_29","answerKey":"A"}
{"id":"Mercury_SC_401774","answerKey":"B"}
{"id":"TIMSS_2003_4_pg14","answerKey":"B"}
{"id":"ACTAAP_2008_7_13","answerKey":"A"}
{"id":"Mercury_SC_416096","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_12","answerKey":"1"}
{"id":"Mercury_7222845","answerKey":"D"}
{"id":"Mercury_SC_401166","answerKey":"A"}
{"id":"LEAP_2005_4_10267","answerKey":"D"}
{"id":"ACTAAP_2007_7_18","answerKey":"D"}
{"id":"Mercury_7188370","answerKey":"D"}
{"id":"MSA_2013_5_5","answerKey":"A"}
{"id":"MCAS_2005_5_8","answerKey":"D"}
{"id":"Mercury_184888","answerKey":"B"}
{"id":"Mercury_7044188","answerKey":"D"}
{"id":"Mercury_7056875","answerKey":"B"}
{"id":"Mercury_SC_400868","answerKey":"C"}
{"id":"Mercury_SC_409142","answerKey":"D"}
{"id":"Mercury_SC_408900","answerKey":"C"}
{"id":"Mercury_SC_401624","answerKey":"A"}
{"id":"Mercury_7188965","answerKey":"B"}
{"id":"NCEOGA_2013_8_32","answerKey":"A"}
{"id":"Mercury_7008208","answerKey":"A"}
{"id":"Mercury_7092400","answerKey":"B"}
{"id":"Mercury_417462","answerKey":"C"}
{"id":"Mercury_SC_401211","answerKey":"B"}
{"id":"Mercury_178605","answerKey":"D"}
{"id":"Mercury_SC_400364","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_8_15","answerKey":"3"}
{"id":"ACTAAP_2015_5_7","answerKey":"C"}
{"id":"Mercury_7180618","answerKey":"A"}
{"id":"Mercury_415262","answerKey":"B"}
{"id":"Mercury_7200200","answerKey":"C"}
{"id":"MDSA_2008_8_24","answerKey":"C"}
{"id":"Mercury_185238","answerKey":"B"}
{"id":"MCAS_2015_8_6","answerKey":"B"}
{"id":"Mercury_7015663","answerKey":"A"}
{"id":"Mercury_SC_LBS10174","answerKey":"A"}
{"id":"Mercury_7024780","answerKey":"C"}
{"id":"Mercury_SC_404975","answerKey":"D"}
{"id":"Mercury_7098928","answerKey":"B"}
{"id":"Mercury_416501","answerKey":"C"}
{"id":"Mercury_SC_406012","answerKey":"B"}
{"id":"Mercury_SC_407192","answerKey":"C"}
{"id":"Mercury_7239890","answerKey":"A"}
{"id":"Mercury_7282135","answerKey":"C"}
{"id":"Mercury_184328","answerKey":"A"}
{"id":"ACTAAP_2007_7_22","answerKey":"A"}
{"id":"MDSA_2010_8_43","answerKey":"B"}
{"id":"Mercury_7044520","answerKey":"B"}
{"id":"Mercury_SC_400518","answerKey":"C"}
{"id":"Mercury_7205573","answerKey":"C"}
{"id":"Mercury_7222863","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_19","answerKey":"B"}
{"id":"Mercury_7121905","answerKey":"C"}
{"id":"LEAP__4_10226","answerKey":"C"}
{"id":"Mercury_7210018","answerKey":"C"}
{"id":"ACTAAP_2013_7_13","answerKey":"A"}
{"id":"MCAS_2014_8_11","answerKey":"B"}
{"id":"OHAT_2007_5_12","answerKey":"B"}
{"id":"MCAS_2000_4_24","answerKey":"C"}
{"id":"MCAS_1999_8_31","answerKey":"D"}
{"id":"Mercury_7212853","answerKey":"C"}
{"id":"Mercury_400808","answerKey":"B"}
{"id":"Mercury_404895","answerKey":"B"}
{"id":"MCAS_2000_8_19","answerKey":"C"}
{"id":"Mercury_415684","answerKey":"C"}
{"id":"Mercury_7068705","answerKey":"A"}
{"id":"MCAS_2011_5_17668","answerKey":"A"}
{"id":"Mercury_402332","answerKey":"C"}
{"id":"Mercury_SC_400601","answerKey":"A"}
{"id":"VASoL_2009_5_27","answerKey":"C"}
{"id":"Mercury_7180705","answerKey":"B"}
{"id":"Mercury_SC_406153","answerKey":"B"}
{"id":"Mercury_SC_408919","answerKey":"D"}
{"id":"Mercury_7024745","answerKey":"D"}
{"id":"MDSA_2008_5_6","answerKey":"C"}
{"id":"Mercury_7090930","answerKey":"D"}
{"id":"Mercury_7033373","answerKey":"D"}
{"id":"Mercury_SC_405219","answerKey":"D"}
{"id":"Mercury_SC_402238","answerKey":"A"}
{"id":"Mercury_7215863","answerKey":"B"}
{"id":"Mercury_7041300","answerKey":"D"}
{"id":"Mercury_7190050","answerKey":"B"}
{"id":"MCAS_1999_4_10","answerKey":"A"}
{"id":"Mercury_7222320","answerKey":"D"}
{"id":"MCAS_2006_8_25","answerKey":"B"}
{"id":"Mercury_7206605","answerKey":"B"}
{"id":"Mercury_SC_408336","answerKey":"B"}
{"id":"Mercury_416579","answerKey":"A"}
{"id":"Mercury_7001575","answerKey":"D"}
{"id":"Mercury_SC_416142","answerKey":"D"}
{"id":"MCAS_1998_8_19","answerKey":"A"}
{"id":"Mercury_SC_400187","answerKey":"D"}
{"id":"TIMSS_2011_8_pg74","answerKey":"B"}
{"id":"Mercury_7013458","answerKey":"D"}
{"id":"CSZ30768","answerKey":"D"}
{"id":"MCAS_2004_9_10","answerKey":"A"}
{"id":"AIMS_2009_4_25","answerKey":"C"}
{"id":"Mercury_7221498","answerKey":"D"}
{"id":"Mercury_7170853","answerKey":"B"}
{"id":"Mercury_7109690","answerKey":"B"}
{"id":"Mercury_SC_400525","answerKey":"A"}
{"id":"Mercury_SC_LBS10616","answerKey":"A"}
{"id":"MCAS_2000_4_21","answerKey":"C"}
{"id":"MCAS_1999_8_9","answerKey":"D"}
{"id":"Mercury_415265","answerKey":"B"}
{"id":"AKDE&ED_2008_8_20","answerKey":"C"}
{"id":"Mercury_415545","answerKey":"D"}
{"id":"AIMS_2008_4_6","answerKey":"A"}
{"id":"TIMSS_1995_8_L7","answerKey":"D"}
{"id":"Mercury_SC_401161","answerKey":"D"}
{"id":"Mercury_7086765","answerKey":"B"}
{"id":"Mercury_183768","answerKey":"B"}
{"id":"TAKS_2009_5_6","answerKey":"A"}
{"id":"Mercury_7222775","answerKey":"C"}
{"id":"VASoL_2008_5_20","answerKey":"B"}
{"id":"MEA_2010_8_9-v1","answerKey":"D"}
{"id":"Mercury_7268030","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_8_5","answerKey":"4"}
{"id":"Mercury_7239138","answerKey":"A"}
{"id":"Mercury_SC_400662","answerKey":"C"}
{"id":"Mercury_414097","answerKey":"C"}
{"id":"Mercury_7083598","answerKey":"B"}
{"id":"MSA_2012_8_36","answerKey":"C"}
{"id":"VASoL_2011_5_36","answerKey":"D"}
{"id":"VASoL_2010_3_13","answerKey":"A"}
{"id":"Mercury_405141","answerKey":"B"}
{"id":"Mercury_7026968","answerKey":"A"}
{"id":"Mercury_7234395","answerKey":"B"}
{"id":"Mercury_402535","answerKey":"B"}
{"id":"MDSA_2011_8_2","answerKey":"B"}
{"id":"Mercury_7086783","answerKey":"C"}
{"id":"Mercury_SC_405164","answerKey":"C"}
{"id":"Mercury_7018340","answerKey":"A"}
{"id":"Mercury_7240923","answerKey":"A"}
{"id":"Mercury_SC_401122","answerKey":"C"}
{"id":"Mercury_7080465","answerKey":"A"}
{"id":"ACTAAP_2014_7_4","answerKey":"A"}
{"id":"Mercury_7267540","answerKey":"D"}
{"id":"Mercury_SC_406684","answerKey":"D"}
{"id":"Mercury_SC_401771","answerKey":"D"}
{"id":"Mercury_7228690","answerKey":"A"}
{"id":"Mercury_7216720","answerKey":"C"}
{"id":"Mercury_7057785","answerKey":"B"}
{"id":"Mercury_7236495","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_9","answerKey":"3"}
{"id":"MCAS_2000_8_24","answerKey":"D"}
{"id":"Mercury_SC_400298","answerKey":"A"}
{"id":"Mercury_7026425","answerKey":"C"}
{"id":"MCAS_2006_9_13-v1","answerKey":"A"}
{"id":"Mercury_SC_402084","answerKey":"A"}
{"id":"Mercury_405595","answerKey":"B"}
{"id":"MDSA_2013_8_2","answerKey":"C"}
{"id":"Mercury_7213868","answerKey":"C"}
{"id":"Mercury_416375","answerKey":"A"}
{"id":"VASoL_2011_5_34","answerKey":"B"}
{"id":"Mercury_411731","answerKey":"B"}
{"id":"TAKS_2009_8_24","answerKey":"C"}
{"id":"Mercury_7018060","answerKey":"B"}
{"id":"Mercury_SC_402044","answerKey":"C"}
{"id":"Mercury_178938","answerKey":"B"}
{"id":"Mercury_SC_416166","answerKey":"C"}
{"id":"FCAT_2008_5_7","answerKey":"D"}
{"id":"ACTAAP_2013_7_15","answerKey":"B"}
{"id":"MCAS_8_2015_14","answerKey":"D"}
{"id":"MCAS_2004_5_32","answerKey":"A"}
{"id":"Mercury_SC_401214","answerKey":"D"}
{"id":"Mercury_7074953","answerKey":"B"}
{"id":"MCAS_2008_5_5632","answerKey":"B"}
{"id":"Mercury_400556","answerKey":"D"}
{"id":"NAEP_2009_4_S11+1","answerKey":"B"}
{"id":"CSZ30263","answerKey":"B"}
{"id":"MEA_2014_8_2","answerKey":"D"}
{"id":"Mercury_179200","answerKey":"A"}
{"id":"Mercury_SC_407706","answerKey":"B"}
{"id":"Mercury_7090755","answerKey":"A"}
{"id":"Mercury_406811","answerKey":"D"}
{"id":"Mercury_7082670","answerKey":"C"}
{"id":"Mercury_7124320","answerKey":"B"}
{"id":"Mercury_SC_409251","answerKey":"C"}
{"id":"MEA_2014_5_15","answerKey":"B"}
{"id":"WASL_2003_5_8","answerKey":"A"}
{"id":"Mercury_7106610","answerKey":"A"}
{"id":"MCAS_2010_8_12012","answerKey":"D"}
{"id":"MCAS_2010_8_12019","answerKey":"B"}
{"id":"Mercury_7159425","answerKey":"A"}
{"id":"Mercury_192343","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_8_9","answerKey":"4"}
{"id":"Mercury_SC_405783","answerKey":"C"}
{"id":"ACTAAP_2013_7_1","answerKey":"C"}
{"id":"MDSA_2008_8_13","answerKey":"C"}
{"id":"MDSA_2011_5_35","answerKey":"C"}
{"id":"MCAS_2005_8_17","answerKey":"D"}
{"id":"VASoL_2009_5_24","answerKey":"B"}
{"id":"LEAP_2008_4_10287","answerKey":"B"}
{"id":"Mercury_192868","answerKey":"C"}
{"id":"MCAS_2004_8_22","answerKey":"A"}
{"id":"Mercury_7013195","answerKey":"B"}
{"id":"MCAS_2010_5_14","answerKey":"C"}
{"id":"Mercury_7220343","answerKey":"A"}
{"id":"TIMSS_1995_8_K16","answerKey":"A"}
{"id":"Mercury_SC_408031","answerKey":"C"}
{"id":"Mercury_7015925","answerKey":"B"}
{"id":"Mercury_7085873","answerKey":"A"}
{"id":"Mercury_7217035","answerKey":"C"}
{"id":"Mercury_7003728","answerKey":"D"}
{"id":"TIMSS_2003_4_pg10","answerKey":"A"}
{"id":"Mercury_SC_401120","answerKey":"C"}
{"id":"Mercury_407132","answerKey":"A"}
{"id":"Mercury_7074988","answerKey":"A"}
{"id":"Mercury_7227798","answerKey":"C"}
{"id":"Mercury_SC_401827","answerKey":"D"}
{"id":"Mercury_SC_401403","answerKey":"A"}
{"id":"MDSA_2010_5_19","answerKey":"B"}
{"id":"MEA_2014_5_12","answerKey":"B"}
{"id":"MSA_2012_5_22","answerKey":"B"}
{"id":"Mercury_SC_407577","answerKey":"D"}
{"id":"Mercury_401760","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_8_33","answerKey":"4"}
{"id":"TIMSS_2007_8_pg34","answerKey":"C"}
{"id":"Mercury_7090633","answerKey":"A"}
{"id":"Mercury_404924","answerKey":"D"}
{"id":"Mercury_7218488","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_12","answerKey":"1"}
{"id":"MDSA_2011_4_16","answerKey":"D"}
{"id":"VASoL_2008_3_33","answerKey":"C"}
{"id":"Mercury_7247853","answerKey":"A"}
{"id":"Mercury_7094553","answerKey":"D"}
{"id":"Mercury_SC_415492","answerKey":"B"}
{"id":"Mercury_7135853","answerKey":"B"}
{"id":"Mercury_400363","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_8_10","answerKey":"3"}
{"id":"Mercury_SC_415584","answerKey":"B"}
{"id":"TIMSS_2003_4_pg12","answerKey":"D"}
{"id":"AIMS_2008_4_18","answerKey":"A"}
{"id":"Mercury_7246960","answerKey":"D"}
{"id":"Mercury_7217350","answerKey":"C"}
{"id":"Mercury_SC_402052","answerKey":"A"}
{"id":"MCAS_2003_8_5","answerKey":"A"}
{"id":"Mercury_7015733","answerKey":"C"}
{"id":"ACTAAP_2014_5_12","answerKey":"C"}
{"id":"Mercury_7214428","answerKey":"D"}
{"id":"Mercury_405465","answerKey":"D"}
{"id":"Mercury_7101518","answerKey":"D"}
{"id":"NAEP_2005_8_S11+11","answerKey":"B"}
{"id":"Mercury_7005058","answerKey":"D"}
{"id":"Mercury_7058503","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_8_2","answerKey":"2"}
{"id":"Mercury_407259","answerKey":"B"}
{"id":"Mercury_7017798","answerKey":"C"}
{"id":"Mercury_7124128","answerKey":"D"}
{"id":"Mercury_SC_402258","answerKey":"C"}
{"id":"TIMSS_2011_4_pg97","answerKey":"B"}
{"id":"Mercury_7162558","answerKey":"A"}
{"id":"Mercury_416414","answerKey":"D"}
{"id":"Mercury_SC_405081","answerKey":"D"}
{"id":"VASoL_2008_3_20","answerKey":"A"}
{"id":"Mercury_177345","answerKey":"D"}
{"id":"Mercury_SC_408033","answerKey":"D"}
{"id":"TIMSS_1995_8_I10","answerKey":"C"}
{"id":"Mercury_415085","answerKey":"C"}
{"id":"Mercury_416550","answerKey":"B"}
{"id":"Mercury_7137043","answerKey":"C"}
{"id":"Mercury_7216633","answerKey":"B"}
{"id":"Mercury_SC_408620","answerKey":"C"}
{"id":"ACTAAP_2009_7_5","answerKey":"C"}
{"id":"Mercury_SC_406674","answerKey":"A"}
{"id":"Mercury_7099348","answerKey":"C"}
{"id":"Mercury_403937","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_8_26","answerKey":"4"}
{"id":"MCAS_8_2015_17","answerKey":"C"}
{"id":"Mercury_7228393","answerKey":"D"}
{"id":"Mercury_415745","answerKey":"C"}
{"id":"Mercury_SC_415338","answerKey":"B"}
{"id":"Mercury_7108238","answerKey":"A"}
{"id":"Mercury_177485","answerKey":"B"}
{"id":"Mercury_SC_405120","answerKey":"C"}
{"id":"MDSA_2009_8_25","answerKey":"D"}
{"id":"VASoL_2009_3_26","answerKey":"C"}
{"id":"Mercury_7264180","answerKey":"B"}
{"id":"Mercury_7004183","answerKey":"B"}
{"id":"Mercury_SC_LBS10026","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_20","answerKey":"1"}
{"id":"Mercury_407359","answerKey":"D"}
{"id":"AKDE&ED_2012_4_39","answerKey":"D"}
{"id":"Mercury_7034790","answerKey":"B"}
{"id":"Mercury_7194268","answerKey":"C"}
{"id":"VASoL_2008_3_21","answerKey":"C"}
{"id":"MCAS_2010_5_11981","answerKey":"B"}
{"id":"Mercury_SC_402241","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_4_23","answerKey":"D"}
{"id":"CSZ40385","answerKey":"B"}
{"id":"MDSA_2007_8_4","answerKey":"A"}
{"id":"Mercury_405469","answerKey":"D"}
{"id":"Mercury_SC_402036","answerKey":"B"}
{"id":"Mercury_7230423","answerKey":"A"}
{"id":"MCAS_2012_8_23649","answerKey":"D"}
{"id":"Mercury_SC_416135","answerKey":"A"}
{"id":"MEAP_2005_8_33","answerKey":"A"}
{"id":"Mercury_415686","answerKey":"D"}
{"id":"NCEOGA_2013_5_49","answerKey":"D"}
{"id":"Mercury_7027265","answerKey":"D"}
{"id":"MEAP_2004_8_48","answerKey":"C"}
{"id":"MCAS_2001_8_9","answerKey":"C"}
{"id":"MCAS_2004_9_20","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_27","answerKey":"3"}
{"id":"Mercury_SC_400871","answerKey":"D"}
{"id":"Mercury_SC_408250","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_36","answerKey":"4"}
{"id":"Mercury_LBS10523","answerKey":"C"}
{"id":"Mercury_7044083","answerKey":"D"}
{"id":"Mercury_402144","answerKey":"D"}
{"id":"NCEOGA_2013_8_49","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_4_12","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_20","answerKey":"D"}
{"id":"Mercury_7068600","answerKey":"B"}
{"id":"MCAS_2004_5_14","answerKey":"A"}
{"id":"Mercury_SC_409024","answerKey":"A"}
{"id":"Mercury_7007928","answerKey":"D"}
{"id":"Mercury_7038850","answerKey":"A"}
{"id":"Mercury_400598","answerKey":"C"}
{"id":"Mercury_7239313","answerKey":"A"}
{"id":"Mercury_7011760","answerKey":"A"}
{"id":"Mercury_7008785","answerKey":"A"}
{"id":"Mercury_180863","answerKey":"D"}
{"id":"Mercury_7014560","answerKey":"C"}
{"id":"Mercury_SC_405059","answerKey":"A"}
{"id":"Mercury_179130","answerKey":"C"}
{"id":"MCAS_2011_5_17671","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_4_14","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_8_6","answerKey":"3"}
{"id":"Mercury_SC_407227","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_4_27","answerKey":"A"}
{"id":"Mercury_SC_401779","answerKey":"B"}
{"id":"OHAT_2008_5_30","answerKey":"C"}
{"id":"Mercury_SC_400181","answerKey":"C"}
{"id":"Mercury_7222670","answerKey":"C"}
{"id":"Mercury_7016328","answerKey":"C"}
{"id":"MEAP_2005_8_14","answerKey":"A"}
{"id":"TIMSS_2007_8_pg4","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_34","answerKey":"3"}
{"id":"Mercury_7202125","answerKey":"C"}
{"id":"Mercury_7242900","answerKey":"C"}
{"id":"Mercury_SC_400588","answerKey":"B"}
{"id":"Mercury_7282695","answerKey":"A"}
{"id":"Mercury_7018498","answerKey":"C"}
{"id":"CSZ20770","answerKey":"C"}
{"id":"Mercury_7174895","answerKey":"D"}
{"id":"Mercury_SC_400375","answerKey":"C"}
{"id":"Mercury_SC_415394","answerKey":"C"}
{"id":"Mercury_412693","answerKey":"D"}
{"id":"Mercury_7056665","answerKey":"C"}
{"id":"MCAS_2003_8_26","answerKey":"C"}
{"id":"TIMSS_2003_4_pg35","answerKey":"B"}
{"id":"Mercury_7218050","answerKey":"D"}
{"id":"Mercury_7010063","answerKey":"D"}
{"id":"Mercury_SC_405499","answerKey":"A"}
{"id":"Mercury_SC_401172","answerKey":"C"}
{"id":"Mercury_7056315","answerKey":"A"}
{"id":"Mercury_7271198","answerKey":"C"}
{"id":"Mercury_7248203","answerKey":"C"}
{"id":"TIMSS_1995_8_I16","answerKey":"A"}
{"id":"Mercury_7201163","answerKey":"C"}
{"id":"Mercury_7064698","answerKey":"A"}
{"id":"Mercury_7081550","answerKey":"B"}
{"id":"ACTAAP_2015_7_7","answerKey":"B"}
{"id":"Mercury_400278","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_4_24","answerKey":"D"}
{"id":"LEAP__7_10341","answerKey":"B"}
{"id":"TIMSS_2003_4_pg8","answerKey":"A"}
{"id":"Mercury_SC_409577","answerKey":"B"}
{"id":"Mercury_7198993","answerKey":"C"}
{"id":"Mercury_SC_409272","answerKey":"C"}
{"id":"VASoL_2008_5_11","answerKey":"C"}
{"id":"Mercury_7199938","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_27","answerKey":"4"}
{"id":"Mercury_7264145","answerKey":"A"}
{"id":"Mercury_192990","answerKey":"A"}
{"id":"Mercury_405777","answerKey":"D"}
{"id":"Mercury_SC_409153","answerKey":"D"}
{"id":"VASoL_2009_3_27","answerKey":"A"}
{"id":"MDSA_2009_8_32","answerKey":"C"}
{"id":"Mercury_SC_400115","answerKey":"C"}
{"id":"MSA_2012_5_21","answerKey":"D"}
{"id":"NCEOGA_2013_5_24","answerKey":"C"}
{"id":"Mercury_7245245","answerKey":"C"}
{"id":"FCAT_2012_8_4","answerKey":"B"}
{"id":"Mercury_SC_402124","answerKey":"A"}
{"id":"Mercury_7219695","answerKey":"A"}
{"id":"Mercury_SC_402122","answerKey":"C"}
{"id":"Mercury_SC_405800","answerKey":"B"}
{"id":"Mercury_SC_400048","answerKey":"A"}
{"id":"Mercury_7163870","answerKey":"C"}
{"id":"MCAS_2006_9_2","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_4_7","answerKey":"D"}
{"id":"Mercury_SC_401170","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_4_24","answerKey":"A"}
{"id":"Mercury_SC_401238","answerKey":"D"}
{"id":"Mercury_7218663","answerKey":"C"}
{"id":"Mercury_7024938","answerKey":"D"}
{"id":"Mercury_182263","answerKey":"C"}
{"id":"Mercury_7086800","answerKey":"C"}
{"id":"Mercury_7248308","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_31","answerKey":"4"}
{"id":"Mercury_7228078","answerKey":"C"}
{"id":"Mercury_SC_402040","answerKey":"D"}
{"id":"Mercury_7026758","answerKey":"B"}
{"id":"Mercury_SC_409676","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_8_9","answerKey":"1"}
{"id":"Mercury_SC_LBS10906","answerKey":"D"}
{"id":"MDSA_2010_8_38","answerKey":"D"}
{"id":"Mercury_416672","answerKey":"B"}
{"id":"ACTAAP_2014_7_12","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_4_26","answerKey":"A"}
{"id":"OHAT_2008_8_29","answerKey":"B"}
{"id":"Mercury_184170","answerKey":"C"}
{"id":"Mercury_405769","answerKey":"B"}
{"id":"Mercury_SC_400359","answerKey":"C"}
{"id":"Mercury_7011393","answerKey":"A"}
{"id":"Mercury_SC_409682","answerKey":"C"}
{"id":"Mercury_7220028","answerKey":"A"}
{"id":"MCAS_2011_5_15","answerKey":"C"}
{"id":"Mercury_SC_400217","answerKey":"D"}
{"id":"TIMSS_2011_4_pg7","answerKey":"D"}
{"id":"Mercury_7263515","answerKey":"B"}
{"id":"Mercury_7018463","answerKey":"B"}
{"id":"Mercury_SC_401833","answerKey":"D"}
{"id":"Mercury_187460","answerKey":"C"}
{"id":"Mercury_SC_409149","answerKey":"A"}
{"id":"Mercury_417589","answerKey":"C"}
{"id":"ACTAAP_2011_5_14","answerKey":"D"}
{"id":"Mercury_400837","answerKey":"A"}
{"id":"Mercury_405942","answerKey":"A"}
{"id":"Mercury_7186935","answerKey":"A"}
{"id":"MDSA_2011_8_13","answerKey":"D"}
{"id":"Mercury_7252263","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_45","answerKey":"1"}
{"id":"Mercury_412625","answerKey":"B"}
{"id":"Mercury_SC_406672","answerKey":"C"}
{"id":"TAKS_2009_8_12","answerKey":"B"}
{"id":"Mercury_7270270","answerKey":"C"}
{"id":"Mercury_SC_LBS11008","answerKey":"D"}
{"id":"Mercury_SC_414087","answerKey":"D"}
{"id":"MEA_2016_5_8","answerKey":"C"}
{"id":"ACTAAP_2014_5_13","answerKey":"A"}
{"id":"MEA_2014_5_3","answerKey":"C"}
{"id":"Mercury_SC_401125","answerKey":"D"}
{"id":"Mercury_7212625","answerKey":"B"}
{"id":"Mercury_7250285","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_4_7","answerKey":"C"}
{"id":"Mercury_7168718","answerKey":"A"}
{"id":"MDSA_2012_8_36","answerKey":"C"}
{"id":"Mercury_7044065","answerKey":"D"}
{"id":"Mercury_SC_410619","answerKey":"C"}
{"id":"Mercury_SC_406026","answerKey":"B"}
{"id":"Mercury_SC_406024","answerKey":"B"}
{"id":"Mercury_406773","answerKey":"B"}
{"id":"Mercury_SC_400857","answerKey":"B"}
{"id":"MDSA_2008_8_29","answerKey":"C"}
{"id":"Mercury_SC_407219","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_4_8","answerKey":"A"}
{"id":"Mercury_7083405","answerKey":"A"}
{"id":"Mercury_SC_402079","answerKey":"D"}
{"id":"ACTAAP_2007_7_17","answerKey":"B"}
{"id":"Mercury_7093013","answerKey":"D"}
{"id":"TIMSS_2003_4_pg17","answerKey":"A"}
{"id":"Mercury_SC_416527","answerKey":"B"}
{"id":"MCAS_2005_9_9","answerKey":"A"}
{"id":"Mercury_7161403","answerKey":"A"}
{"id":"Mercury_SC_405062","answerKey":"C"}
{"id":"Mercury_7142748","answerKey":"B"}
{"id":"Mercury_7024675","answerKey":"D"}
{"id":"Mercury_7267575","answerKey":"C"}
{"id":"Mercury_7170905","answerKey":"B"}
{"id":"Mercury_400158","answerKey":"D"}
{"id":"MCAS_2011_5_11","answerKey":"A"}
{"id":"Mercury_SC_400595","answerKey":"D"}
{"id":"TIMSS_2011_4_pg58","answerKey":"C"}
{"id":"Mercury_7245578","answerKey":"C"}
{"id":"Mercury_7217280","answerKey":"D"}
{"id":"Mercury_405951","answerKey":"A"}
{"id":"Mercury_7017920","answerKey":"A"}
{"id":"Mercury_SC_415005","answerKey":"B"}
{"id":"MEA_2013_5_16","answerKey":"A"}
{"id":"Mercury_SC_401155","answerKey":"A"}
{"id":"TIMSS_2011_8_pg19","answerKey":"C"}
{"id":"Mercury_7013685","answerKey":"A"}
{"id":"Mercury_SC_401119","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_4_10","answerKey":"C"}
{"id":"MCAS_2016_5_16","answerKey":"B"}
{"id":"Mercury_178308","answerKey":"C"}
{"id":"Mercury_405107","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_28","answerKey":"3"}
{"id":"Mercury_400799","answerKey":"C"}
{"id":"Mercury_SC_401219","answerKey":"D"}
{"id":"VASoL_2009_5_22","answerKey":"D"}
{"id":"ACTAAP_2013_5_5","answerKey":"B"}
{"id":"NCEOGA_2013_5_27","answerKey":"D"}
{"id":"ACTAAP_2008_5_12","answerKey":"A"}
{"id":"ACTAAP_2008_7_17","answerKey":"D"}
{"id":"Mercury_7250110","answerKey":"B"}
{"id":"WASL_2005_5_10","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_8_24","answerKey":"3"}
{"id":"Mercury_7091858","answerKey":"D"}
{"id":"Mercury_405948","answerKey":"A"}
{"id":"OHAT_2009_8_8","answerKey":"B"}
{"id":"Mercury_SC_408425","answerKey":"B"}
{"id":"Mercury_SC_415489","answerKey":"C"}
{"id":"Mercury_7037678","answerKey":"D"}
{"id":"TIMSS_2011_4_pg14","answerKey":"D"}
{"id":"Mercury_SC_LBS10391","answerKey":"D"}
{"id":"CSZ_2009_8_CSZ30764","answerKey":"B"}
{"id":"Mercury_7040933","answerKey":"B"}
{"id":"Mercury_7175805","answerKey":"D"}
{"id":"Mercury_7128870","answerKey":"A"}
{"id":"Mercury_SC_406851","answerKey":"C"}
{"id":"Mercury_409822","answerKey":"C"}
{"id":"LEAP_2005_4_10266","answerKey":"B"}
{"id":"Mercury_7024360","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_4_10","answerKey":"C"}
{"id":"Mercury_SC_401141","answerKey":"A"}
{"id":"MCAS_2012_5_23619","answerKey":"B"}
{"id":"Mercury_416642","answerKey":"B"}
{"id":"Mercury_7210193","answerKey":"A"}
{"id":"Mercury_7057330","answerKey":"A"}
{"id":"Mercury_7250128","answerKey":"D"}
{"id":"Mercury_SC_LBS10338","answerKey":"A"}
{"id":"Mercury_7008960","answerKey":"D"}
{"id":"MEA_2010_8_13","answerKey":"D"}
{"id":"Mercury_SC_409595","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_8_3","answerKey":"3"}
{"id":"Mercury_7008383","answerKey":"C"}
{"id":"Mercury_7248150","answerKey":"A"}
{"id":"Mercury_401240","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_30","answerKey":"2"}
{"id":"TIMSS_2011_4_pg102","answerKey":"B"}
{"id":"Mercury_SC_400693","answerKey":"D"}
{"id":"Mercury_7168140","answerKey":"C"}
{"id":"Mercury_175963","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_8_15","answerKey":"1"}
{"id":"Mercury_SC_400676","answerKey":"C"}
{"id":"Mercury_7007683","answerKey":"A"}
{"id":"LEAP_2004_4_10258","answerKey":"A"}
{"id":"Mercury_7268153","answerKey":"B"}
{"id":"Mercury_LBS10795","answerKey":"D"}
{"id":"Mercury_7176208","answerKey":"B"}
{"id":"Mercury_7206063","answerKey":"D"}
{"id":"Mercury_SC_415028","answerKey":"D"}
{"id":"Mercury_400471","answerKey":"D"}
{"id":"Mercury_7072625","answerKey":"B"}
{"id":"VASoL_2008_5_25","answerKey":"B"}
{"id":"Mercury_410275","answerKey":"B"}
{"id":"Mercury_SC_415413","answerKey":"A"}
{"id":"TIMSS_2007_4_pg34","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_17","answerKey":"D"}
{"id":"Mercury_7016853","answerKey":"B"}
{"id":"Mercury_SC_401817","answerKey":"B"}
{"id":"MDSA_2007_5_51","answerKey":"C"}
{"id":"ACTAAP_2010_7_17","answerKey":"D"}
{"id":"Mercury_7008155","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_19","answerKey":"3"}
{"id":"Mercury_7148208","answerKey":"A"}
{"id":"Mercury_SC_400709","answerKey":"B"}
{"id":"Mercury_412755","answerKey":"D"}
{"id":"Mercury_SC_401331","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_4_3","answerKey":"B"}
{"id":"Mercury_SC_413002","answerKey":"A"}
{"id":"Mercury_7042630","answerKey":"B"}
{"id":"MCAS_2003_5_10","answerKey":"D"}
{"id":"TIMSS_2007_4_pg105","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_4_13","answerKey":"B"}
{"id":"Mercury_7024203","answerKey":"A"}
{"id":"Mercury_7007543","answerKey":"C"}
{"id":"Mercury_7241255","answerKey":"A"}
{"id":"Mercury_7222408","answerKey":"B"}
{"id":"Mercury_7016240","answerKey":"A"}
{"id":"TIMSS_2007_8_pg57","answerKey":"C"}
{"id":"MDSA_2007_8_23","answerKey":"C"}
{"id":"MEA_2016_5_3","answerKey":"B"}
{"id":"MEAP_2005_5_16","answerKey":"C"}
{"id":"Mercury_7009818","answerKey":"A"}
{"id":"MCAS_2004_5_22","answerKey":"A"}
{"id":"ACTAAP_2007_7_27","answerKey":"B"}
{"id":"MDSA_2008_5_25","answerKey":"A"}
{"id":"TIMSS_1995_8_L5","answerKey":"B"}
{"id":"Mercury_7217438","answerKey":"D"}
{"id":"Mercury_411809","answerKey":"B"}
{"id":"Mercury_7188213","answerKey":"D"}
{"id":"Mercury_SC_401800","answerKey":"B"}
{"id":"Mercury_SC_406273","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_8_7","answerKey":"3"}
{"id":"Mercury_7174668","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_16","answerKey":"1"}
{"id":"MCAS_2005_5_33","answerKey":"D"}
{"id":"Mercury_7056648","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_43","answerKey":"1"}
{"id":"AKDE&ED_2008_8_50","answerKey":"B"}
{"id":"Mercury_7205363","answerKey":"A"}
{"id":"Mercury_SC_407507","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_23","answerKey":"2"}
{"id":"TAKS_2009_5_20","answerKey":"C"}
{"id":"MDSA_2010_8_8","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_8_13","answerKey":"2"}
{"id":"NYSEDREGENTS_2013_8_8","answerKey":"1"}
{"id":"MCAS_2011_5_10","answerKey":"A"}
{"id":"ACTAAP_2014_7_10","answerKey":"B"}
{"id":"Mercury_SC_408435","answerKey":"A"}
{"id":"Mercury_7245788","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_8_25","answerKey":"3"}
{"id":"MCAS_2004_8_20","answerKey":"A"}
{"id":"Mercury_7207463","answerKey":"B"}
{"id":"OHAT_2007_5_40","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_8_33","answerKey":"3"}
{"id":"MSA_2012_5_13","answerKey":"C"}
{"id":"Mercury_7211015","answerKey":"D"}
{"id":"MCAS_1999_8_17","answerKey":"D"}
{"id":"Mercury_7071978","answerKey":"B"}
{"id":"Mercury_414500","answerKey":"D"}
{"id":"Mercury_7014000","answerKey":"A"}
{"id":"Mercury_SC_400224","answerKey":"B"}
{"id":"NCEOGA_2013_8_29","answerKey":"C"}
{"id":"LEAP_2002_8_10389","answerKey":"D"}
{"id":"Mercury_416507","answerKey":"C"}
{"id":"VASoL_2008_3_35","answerKey":"A"}
{"id":"Mercury_7010798","answerKey":"C"}
{"id":"Mercury_7018078","answerKey":"B"}
{"id":"Mercury_7141278","answerKey":"A"}
{"id":"Mercury_SC_400988","answerKey":"D"}
{"id":"Mercury_SC_415415","answerKey":"A"}
{"id":"NAEP_2009_4_S7+6","answerKey":"D"}
{"id":"Mercury_SC_401296","answerKey":"A"}
{"id":"Mercury_SC_400406","answerKey":"B"}
{"id":"Mercury_7023275","answerKey":"B"}
{"id":"Mercury_SC_405020","answerKey":"D"}
{"id":"Mercury_405940","answerKey":"D"}
{"id":"Mercury_SC_405734","answerKey":"C"}
{"id":"MCAS_2005_8_20","answerKey":"B"}
{"id":"Mercury_7197960","answerKey":"A"}
{"id":"TIMSS_2003_8_pg57","answerKey":"B"}
{"id":"TAKS_2009_5_29","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_8","answerKey":"4"}
{"id":"Mercury_7219030","answerKey":"B"}
{"id":"Mercury_7141890","answerKey":"B"}
{"id":"Mercury_7075215","answerKey":"C"}
{"id":"Mercury_SC_416173","answerKey":"C"}
{"id":"Mercury_7283885","answerKey":"D"}
{"id":"MCAS_1998_4_7","answerKey":"D"}
{"id":"OHAT_2007_8_42","answerKey":"B"}
{"id":"AKDE&ED_2012_8_7","answerKey":"D"}
{"id":"Mercury_LBS10993","answerKey":"C"}
{"id":"Mercury_7084018","answerKey":"C"}
{"id":"Mercury_SC_415399","answerKey":"A"}
{"id":"Mercury_7071803","answerKey":"A"}
{"id":"MCAS_2011_8_17683","answerKey":"C"}
{"id":"Mercury_SC_408439","answerKey":"A"}
{"id":"Mercury_7115290","answerKey":"C"}
{"id":"Mercury_7007613","answerKey":"D"}
{"id":"Mercury_7161298","answerKey":"B"}
{"id":"Mercury_180198","answerKey":"B"}
{"id":"MEAP_2004_8_1","answerKey":"B"}
{"id":"LEAP_2001_4_10240","answerKey":"D"}
{"id":"AKDE&ED_2012_4_2","answerKey":"C"}
{"id":"AKDE&ED_2008_8_18","answerKey":"A"}
{"id":"Mercury_400347","answerKey":"A"}
{"id":"Mercury_7001313","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_8_18","answerKey":"4"}
{"id":"Mercury_7026810","answerKey":"C"}
{"id":"Mercury_SC_415352","answerKey":"A"}
{"id":"Mercury_SC_413632","answerKey":"B"}
{"id":"Mercury_SC_408881","answerKey":"C"}
{"id":"MSA_2013_5_15","answerKey":"C"}
{"id":"Mercury_SC_401316","answerKey":"C"}
{"id":"TIMSS_2003_8_pg99","answerKey":"C"}
{"id":"Mercury_405056","answerKey":"A"}
{"id":"Mercury_7113978","answerKey":"C"}
{"id":"Mercury_7041388","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_23","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_1","answerKey":"4"}
{"id":"Mercury_7238893","answerKey":"D"}
{"id":"Mercury_7018095","answerKey":"D"}
{"id":"Mercury_SC_408344","answerKey":"B"}
{"id":"Mercury_7171955","answerKey":"A"}
{"id":"Mercury_7013895","answerKey":"B"}
{"id":"MCAS_2011_8_17682","answerKey":"C"}
{"id":"Mercury_7194320","answerKey":"A"}
{"id":"Mercury_7029785","answerKey":"A"}
{"id":"Mercury_7284008","answerKey":"A"}
{"id":"Mercury_402093","answerKey":"A"}
{"id":"Mercury_401313","answerKey":"D"}
{"id":"Mercury_SC_405490","answerKey":"D"}
{"id":"Mercury_7160528","answerKey":"D"}
{"id":"TIMSS_2007_8_pg130","answerKey":"D"}
{"id":"Mercury_7083773","answerKey":"B"}
{"id":"Mercury_7188860","answerKey":"D"}
{"id":"NCEOGA_2013_5_14","answerKey":"D"}
{"id":"Mercury_7113803","answerKey":"A"}
{"id":"Mercury_SC_401136","answerKey":"B"}
{"id":"LEAP__7_10342","answerKey":"B"}
{"id":"Mercury_SC_405838","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_8_26","answerKey":"1"}
{"id":"MCAS_1999_4_15","answerKey":"B"}
{"id":"Mercury_SC_414155","answerKey":"D"}
{"id":"CSZ_2008_8_12","answerKey":"C"}
{"id":"CSZ_2005_5_CSZ20517","answerKey":"D"}
{"id":"Mercury_SC_406543","answerKey":"B"}
{"id":"Mercury_SC_406688","answerKey":"D"}
{"id":"Mercury_7205748","answerKey":"D"}
{"id":"Mercury_7007473","answerKey":"A"}
{"id":"AKDE&ED_2012_4_14","answerKey":"C"}
{"id":"Mercury_7008348","answerKey":"D"}
{"id":"Mercury_SC_401838","answerKey":"C"}
{"id":"Mercury_7018428","answerKey":"B"}
{"id":"Mercury_SC_LBS10272","answerKey":"C"}
{"id":"Mercury_411737","answerKey":"C"}
{"id":"MCAS_2011_8_17696","answerKey":"D"}
{"id":"Mercury_400060","answerKey":"C"}
{"id":"Mercury_7094395","answerKey":"B"}
{"id":"Mercury_402634","answerKey":"D"}
{"id":"Mercury_7218908","answerKey":"A"}
{"id":"Mercury_7037275","answerKey":"B"}
{"id":"LEAP__8_10366","answerKey":"C"}
{"id":"Mercury_7012495","answerKey":"D"}
{"id":"Mercury_7210350","answerKey":"B"}
{"id":"Mercury_SC_LBS10270","answerKey":"A"}
{"id":"Mercury_SC_405090","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_8_34","answerKey":"4"}
{"id":"Mercury_402102","answerKey":"B"}
{"id":"Mercury_7220430","answerKey":"C"}
{"id":"AKDE&ED_2008_8_31","answerKey":"D"}
{"id":"TAKS_2009_8_29","answerKey":"A"}
{"id":"Mercury_7137480","answerKey":"C"}
{"id":"MCAS_2015_8_20","answerKey":"C"}
{"id":"Mercury_7069388","answerKey":"C"}
{"id":"TIMSS_2003_4_pg87","answerKey":"C"}
{"id":"Mercury_7194495","answerKey":"D"}
{"id":"OHAT_2008_8_15","answerKey":"B"}
{"id":"AKDE&ED_2008_8_49","answerKey":"A"}
{"id":"Mercury_SC_408746","answerKey":"D"}
{"id":"ACTAAP_2009_7_9","answerKey":"D"}
{"id":"TIMSS_2011_8_pg98","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_4_7","answerKey":"A"}
{"id":"Mercury_7263655","answerKey":"A"}
{"id":"VASoL_2011_5_16","answerKey":"D"}
{"id":"Mercury_SC_400021","answerKey":"C"}
{"id":"VASoL_2009_5_36","answerKey":"D"}
{"id":"Mercury_SC_405154","answerKey":"B"}
{"id":"Mercury_7128853","answerKey":"A"}
{"id":"Mercury_7044240","answerKey":"D"}
{"id":"Mercury_407661","answerKey":"D"}
{"id":"Mercury_7234518","answerKey":"C"}
{"id":"Mercury_7179340","answerKey":"D"}
{"id":"OHAT_2007_8_44","answerKey":"C"}
{"id":"Mercury_7099225","answerKey":"D"}
{"id":"Mercury_SC_400183","answerKey":"D"}
{"id":"Mercury_7206623","answerKey":"C"}
{"id":"Mercury_7026513","answerKey":"D"}
{"id":"Mercury_402092","answerKey":"C"}
{"id":"VASoL_2007_5_31","answerKey":"C"}
{"id":"Mercury_409111","answerKey":"B"}
{"id":"AKDE&ED_2012_8_1","answerKey":"C"}
{"id":"MCAS_2006_9_5","answerKey":"B"}
{"id":"MDSA_2007_5_57","answerKey":"D"}
{"id":"Mercury_SC_408628","answerKey":"C"}
{"id":"Mercury_SC_401310","answerKey":"B"}
{"id":"Mercury_SC_409172","answerKey":"A"}
{"id":"Mercury_7211628","answerKey":"B"}
{"id":"Mercury_412777","answerKey":"D"}
{"id":"MCAS_2000_4_35","answerKey":"B"}
{"id":"Mercury_SC_401358","answerKey":"C"}
{"id":"Mercury_7181633","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_6","answerKey":"4"}
{"id":"Mercury_411027","answerKey":"B"}
{"id":"Mercury_7267943","answerKey":"C"}
{"id":"Mercury_SC_406467","answerKey":"A"}
{"id":"MCAS_2006_9_29","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_4_22","answerKey":"B"}
{"id":"MEAP_2005_8_16","answerKey":"B"}
{"id":"Mercury_SC_402074","answerKey":"D"}
{"id":"Mercury_7056543","answerKey":"D"}
{"id":"MCAS_2000_4_11","answerKey":"A"}
{"id":"Mercury_7219118","answerKey":"C"}
{"id":"Mercury_7043873","answerKey":"B"}
{"id":"Mercury_7207060","answerKey":"C"}
{"id":"Mercury_7090563","answerKey":"A"}
{"id":"Mercury_SC_402623","answerKey":"B"}
{"id":"Mercury_SC_416138","answerKey":"C"}
{"id":"Mercury_SC_405510","answerKey":"D"}
{"id":"MDSA_2013_8_33","answerKey":"D"}
{"id":"Mercury_7271373","answerKey":"C"}
{"id":"Mercury_400629","answerKey":"B"}
{"id":"MEA_2010_8_9","answerKey":"D"}
{"id":"Mercury_7217595","answerKey":"A"}
{"id":"MEA_2010_8_6-v1","answerKey":"A"}
{"id":"AKDE&ED_2008_8_39","answerKey":"C"}
{"id":"Mercury_406546","answerKey":"B"}
{"id":"AKDE&ED_2012_8_15","answerKey":"C"}
{"id":"Mercury_SC_400610","answerKey":"C"}
{"id":"Mercury_SC_400532","answerKey":"A"}
{"id":"MCAS_2005_8_1","answerKey":"C"}
{"id":"Mercury_400620","answerKey":"B"}
{"id":"MCAS_8_2015_12","answerKey":"C"}
{"id":"Mercury_7234623","answerKey":"D"}
{"id":"Mercury_7211260","answerKey":"C"}
{"id":"MCAS_2001_8_19","answerKey":"C"}
{"id":"Mercury_SC_402117","answerKey":"A"}
{"id":"TIMSS_2007_8_pg29","answerKey":"D"}
{"id":"Mercury_SC_401126","answerKey":"D"}
{"id":"Mercury_7267488","answerKey":"A"}
{"id":"OHAT_2010_8_29","answerKey":"A"}
{"id":"Mercury_7132370","answerKey":"A"}
{"id":"Mercury_7162803","answerKey":"D"}
{"id":"Mercury_7026530","answerKey":"A"}
{"id":"MCAS_2012_8_23653","answerKey":"B"}
{"id":"CSZ_2009_8_CSZ30585","answerKey":"A"}
{"id":"MCAS_8_2014_13","answerKey":"D"}
{"id":"Mercury_416377","answerKey":"C"}
{"id":"Mercury_407668","answerKey":"C"}
{"id":"Mercury_SC_406664","answerKey":"C"}
{"id":"Mercury_7188510","answerKey":"B"}
{"id":"Mercury_7146178","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_4_16","answerKey":"B"}
{"id":"Mercury_SC_408740","answerKey":"C"}
{"id":"Mercury_SC_401811","answerKey":"D"}
{"id":"MCAS_1999_8_23","answerKey":"C"}
{"id":"Mercury_7124268","answerKey":"D"}
{"id":"Mercury_SC_400663","answerKey":"A"}
{"id":"Mercury_7207078","answerKey":"C"}
{"id":"MCAS_2003_8_10","answerKey":"B"}
{"id":"Mercury_SC_416136","answerKey":"D"}
{"id":"NAEP_2000_8_S11+5","answerKey":"B"}
{"id":"Mercury_SC_400376","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_8_19","answerKey":"4"}
{"id":"LEAP__7_10345","answerKey":"B"}
{"id":"Mercury_7161473","answerKey":"C"}
{"id":"MCAS_2006_9_42","answerKey":"A"}
{"id":"Mercury_410467","answerKey":"D"}
{"id":"Mercury_SC_416161","answerKey":"A"}
{"id":"Mercury_404614","answerKey":"C"}
{"id":"Mercury_7220833","answerKey":"C"}
{"id":"MCAS_2005_9_19-v1","answerKey":"D"}
{"id":"Mercury_SC_407376","answerKey":"C"}
{"id":"ACTAAP_2008_5_5","answerKey":"D"}
{"id":"Mercury_7220010","answerKey":"A"}
{"id":"Mercury_7041878","answerKey":"C"}
{"id":"Mercury_LBS10126","answerKey":"A"}
{"id":"Mercury_7030783","answerKey":"A"}
{"id":"Mercury_SC_402642","answerKey":"D"}
{"id":"Mercury_SC_401269","answerKey":"B"}
{"id":"Mercury_7162785","answerKey":"B"}
{"id":"TIMSS_2007_8_pg102","answerKey":"A"}
{"id":"AKDE&ED_2008_8_30","answerKey":"A"}
{"id":"Mercury_7032393","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_23","answerKey":"D"}
{"id":"Mercury_SC_LBS10177","answerKey":"D"}
{"id":"TIMSS_1995_8_L3","answerKey":"D"}
{"id":"TIMSS_2003_4_pg25","answerKey":"B"}
{"id":"Mercury_416637","answerKey":"A"}
{"id":"MEAP_2005_8_1","answerKey":"A"}
{"id":"Mercury_LBS10287","answerKey":"D"}
{"id":"Mercury_7107993","answerKey":"C"}
{"id":"Mercury_7238928","answerKey":"D"}
{"id":"NAEP_2005_8_S13+8","answerKey":"D"}
{"id":"Mercury_SC_400306","answerKey":"B"}
{"id":"Mercury_7173880","answerKey":"D"}
{"id":"ACTAAP_2015_7_1","answerKey":"D"}
{"id":"Mercury_SC_401275","answerKey":"C"}
{"id":"Mercury_SC_400361","answerKey":"B"}
{"id":"Mercury_7026775","answerKey":"C"}
{"id":"Mercury_7284095","answerKey":"B"}
{"id":"Mercury_SC_LBS10901","answerKey":"C"}
{"id":"Mercury_7123445","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_11","answerKey":"C"}
{"id":"MCAS_2006_9_40","answerKey":"C"}
{"id":"Mercury_SC_402270","answerKey":"D"}
{"id":"VASoL_2007_3_15","answerKey":"C"}
{"id":"Mercury_7138863","answerKey":"B"}
{"id":"Mercury_SC_415738","answerKey":"A"}
{"id":"Mercury_7009573","answerKey":"B"}
{"id":"Mercury_SC_402053","answerKey":"D"}
{"id":"MCAS_2012_8_23650","answerKey":"B"}
{"id":"MCAS_2005_5_12","answerKey":"C"}
{"id":"TAKS_2009_5_21","answerKey":"B"}
{"id":"VASoL_2008_5_26","answerKey":"C"}
{"id":"ACTAAP_2014_7_2","answerKey":"D"}
{"id":"Mercury_SC_400523","answerKey":"D"}
{"id":"Mercury_SC_402094","answerKey":"B"}
{"id":"Mercury_7122500","answerKey":"B"}
{"id":"Mercury_7107310","answerKey":"C"}
{"id":"Mercury_7007840","answerKey":"C"}
{"id":"AIMS_2008_8_12","answerKey":"B"}
{"id":"Mercury_SC_415350","answerKey":"A"}
{"id":"MCAS_2000_8_17","answerKey":"A"}
{"id":"Mercury_SC_404998","answerKey":"A"}
{"id":"Mercury_SC_400600","answerKey":"C"}
{"id":"Mercury_SC_401611","answerKey":"C"}
{"id":"NCEOGA_2013_8_53","answerKey":"B"}
{"id":"Mercury_7077490","answerKey":"B"}
{"id":"AKDE&ED_2008_8_38","answerKey":"A"}
{"id":"VASoL_2008_3_38","answerKey":"C"}
{"id":"Mercury_7251720","answerKey":"D"}
{"id":"Mercury_405461","answerKey":"C"}
{"id":"Mercury_404899","answerKey":"C"}
{"id":"Mercury_404107","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_4_10","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_4_19","answerKey":"C"}
{"id":"Mercury_405852","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_4_18","answerKey":"D"}
{"id":"Mercury_7166950","answerKey":"A"}
{"id":"Mercury_SC_400065","answerKey":"C"}
{"id":"MDSA_2009_8_29","answerKey":"C"}
{"id":"Mercury_417461","answerKey":"D"}
{"id":"Mercury_7271163","answerKey":"A"}
{"id":"Mercury_7139720","answerKey":"D"}
{"id":"Mercury_400256","answerKey":"B"}
{"id":"Mercury_SC_LBS10618","answerKey":"A"}
{"id":"NCEOGA_2013_8_39","answerKey":"D"}
{"id":"Mercury_SC_400603","answerKey":"A"}
{"id":"Mercury_7174003","answerKey":"B"}
{"id":"Mercury_SC_401629","answerKey":"C"}
{"id":"Mercury_7037345","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_4_12","answerKey":"A"}
{"id":"Mercury_7112805","answerKey":"A"}
{"id":"Mercury_SC_405482","answerKey":"C"}
{"id":"TIMSS_2003_4_pg81","answerKey":"D"}
{"id":"MDSA_2011_8_33","answerKey":"A"}
{"id":"Mercury_7137130","answerKey":"C"}
{"id":"ACTAAP_2007_7_26","answerKey":"B"}
{"id":"Mercury_SC_413135","answerKey":"C"}
{"id":"Mercury_409065","answerKey":"C"}
{"id":"Mercury_7230598","answerKey":"C"}
{"id":"Mercury_7131023","answerKey":"D"}
{"id":"AKDE&ED_2012_8_48","answerKey":"A"}
{"id":"Mercury_7085558","answerKey":"C"}
{"id":"CSZ20228","answerKey":"C"}
{"id":"Mercury_7250005","answerKey":"C"}
{"id":"Mercury_SC_408039","answerKey":"D"}
{"id":"LEAP_2006_4_10275","answerKey":"C"}
{"id":"MCAS_2004_5_8","answerKey":"C"}
{"id":"Mercury_401643","answerKey":"B"}
{"id":"MCAS_8_2015_4","answerKey":"A"}
{"id":"Mercury_186568","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_4_16","answerKey":"C"}
{"id":"Mercury_7210630","answerKey":"C"}
{"id":"Mercury_SC_401264","answerKey":"C"}
{"id":"Mercury_SC_406016","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_13","answerKey":"B"}
{"id":"Mercury_SC_402071","answerKey":"C"}
{"id":"Mercury_7025130","answerKey":"A"}
{"id":"MEA_2013_8_19","answerKey":"C"}
{"id":"Mercury_416581","answerKey":"A"}
{"id":"Mercury_410593","answerKey":"C"}
{"id":"Mercury_SC_400842","answerKey":"D"}
{"id":"Mercury_SC_405881","answerKey":"D"}
{"id":"Mercury_SC_409563","answerKey":"A"}
{"id":"Mercury_7206133","answerKey":"D"}
{"id":"MCAS_2006_5_1","answerKey":"A"}
{"id":"Mercury_7200585","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_4_18","answerKey":"B"}
{"id":"Mercury_7083738","answerKey":"A"}
{"id":"Mercury_7247083","answerKey":"C"}
{"id":"Mercury_182945","answerKey":"D"}
{"id":"Mercury_7200148","answerKey":"D"}
{"id":"MEA_2016_8_18","answerKey":"D"}
{"id":"MCAS_2001_5_15","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_4_15","answerKey":"C"}
{"id":"MCAS_2013_8_29418","answerKey":"D"}
{"id":"NCEOGA_2013_8_13","answerKey":"C"}
{"id":"Mercury_177153","answerKey":"D"}
{"id":"Mercury_7228305","answerKey":"D"}
{"id":"Mercury_SC_405856","answerKey":"A"}
{"id":"Mercury_SC_401000","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_4_9","answerKey":"D"}
{"id":"TIMSS_2011_8_pg25","answerKey":"C"}
{"id":"Mercury_7004795","answerKey":"B"}
{"id":"TIMSS_2007_4_pg64","answerKey":"A"}
{"id":"VASoL_2008_5_14","answerKey":"B"}
{"id":"Mercury_416636","answerKey":"D"}
{"id":"TIMSS_2003_4_pg11","answerKey":"D"}
{"id":"Mercury_SC_401129","answerKey":"B"}
{"id":"Mercury_7263095","answerKey":"A"}
{"id":"VASoL_2009_3_25","answerKey":"A"}
{"id":"MCAS_1999_8_28","answerKey":"D"}
{"id":"VASoL_2009_3_28","answerKey":"B"}
{"id":"Mercury_7191188","answerKey":"D"}
{"id":"Mercury_7027545","answerKey":"D"}
{"id":"Mercury_SC_408884","answerKey":"D"}
{"id":"Mercury_SC_405865","answerKey":"B"}
{"id":"Mercury_SC_401225","answerKey":"D"}
{"id":"LEAP_2008_4_10286","answerKey":"A"}
{"id":"Mercury_7283343","answerKey":"D"}
{"id":"Mercury_7123533","answerKey":"A"}
{"id":"Mercury_7008260","answerKey":"C"}
{"id":"Mercury_SC_400658","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_8_7","answerKey":"2"}
{"id":"Mercury_SC_415454","answerKey":"B"}
{"id":"Mercury_SC_405340","answerKey":"B"}
{"id":"Mercury_SC_407371","answerKey":"A"}
{"id":"Mercury_7211505","answerKey":"A"}
{"id":"Mercury_SC_400114","answerKey":"A"}
{"id":"Mercury_7040758","answerKey":"B"}
{"id":"Mercury_7043943","answerKey":"A"}
{"id":"Mercury_401246","answerKey":"B"}
{"id":"MCAS_2010_8_12003","answerKey":"B"}
{"id":"AKDE&ED_2008_4_33","answerKey":"C"}
{"id":"CSZ20823","answerKey":"C"}
{"id":"ACTAAP_2010_5_8","answerKey":"A"}
{"id":"Mercury_7001278","answerKey":"D"}
{"id":"Mercury_SC_LBS10952","answerKey":"B"}
{"id":"Mercury_SC_407606","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_31","answerKey":"2"}
{"id":"Mercury_7044713","answerKey":"C"}
{"id":"Mercury_7056910","answerKey":"B"}
{"id":"Mercury_SC_LBS10390","answerKey":"C"}
{"id":"VASoL_2008_5_16","answerKey":"B"}
{"id":"Mercury_7005513","answerKey":"D"}
{"id":"Mercury_7092488","answerKey":"B"}
{"id":"MCAS_2016_5_2","answerKey":"D"}
{"id":"Mercury_7220973","answerKey":"A"}
{"id":"Mercury_7016520","answerKey":"C"}
{"id":"Mercury_SC_413638","answerKey":"A"}
{"id":"Mercury_7236618","answerKey":"C"}
{"id":"Mercury_7183523","answerKey":"A"}
{"id":"Mercury_7043890","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_4_4","answerKey":"D"}
{"id":"MEA_2016_8_19","answerKey":"C"}
{"id":"Mercury_7080973","answerKey":"B"}
{"id":"VASoL_2011_5_25","answerKey":"D"}
{"id":"Mercury_7215548","answerKey":"B"}
{"id":"Mercury_7239365","answerKey":"C"}
{"id":"Mercury_7223423","answerKey":"D"}
{"id":"Mercury_7081288","answerKey":"B"}
{"id":"Mercury_400198","answerKey":"D"}
{"id":"OHAT_2007_8_43","answerKey":"A"}
{"id":"Mercury_LBS10817","answerKey":"C"}
{"id":"Mercury_189753","answerKey":"B"}
{"id":"OHAT_2011_5_37","answerKey":"A"}
{"id":"Mercury_7247048","answerKey":"B"}
{"id":"CSZ20334","answerKey":"A"}
{"id":"Mercury_7195178","answerKey":"B"}
{"id":"Mercury_7246278","answerKey":"D"}
{"id":"Mercury_7015908","answerKey":"D"}
{"id":"Mercury_7235603","answerKey":"D"}
{"id":"Mercury_7097440","answerKey":"C"}
{"id":"Mercury_SC_417579","answerKey":"D"}
{"id":"NCEOGA_2013_8_56","answerKey":"B"}
{"id":"Mercury_7217333","answerKey":"C"}
{"id":"Mercury_SC_409574","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_8_40","answerKey":"1"}
{"id":"Mercury_7216913","answerKey":"D"}
{"id":"ACTAAP_2008_5_13","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_11","answerKey":"4"}
{"id":"MCAS_2004_8_35","answerKey":"C"}
{"id":"Mercury_SC_413004","answerKey":"A"}
{"id":"Mercury_7014385","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_4_4","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_4_26","answerKey":"D"}
{"id":"ACTAAP_2014_5_4","answerKey":"C"}
{"id":"TIMSS_2003_8_pg43","answerKey":"C"}
{"id":"ACTAAP_2013_7_8","answerKey":"C"}
{"id":"CSZ_2009_8_CSZ30651","answerKey":"A"}
{"id":"Mercury_SC_LBS10688","answerKey":"C"}
{"id":"NCEOGA_2013_5_S2","answerKey":"B"}
{"id":"Mercury_SC_410835","answerKey":"C"}
{"id":"ACTAAP_2008_7_7","answerKey":"C"}
{"id":"Mercury_7015890","answerKey":"B"}
{"id":"Mercury_7077665","answerKey":"D"}
{"id":"Mercury_7234168","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_4_29","answerKey":"A"}
{"id":"Mercury_7188353","answerKey":"A"}
{"id":"MCAS_2006_5_21","answerKey":"C"}
{"id":"Mercury_SC_409157","answerKey":"A"}
{"id":"MEA_2016_8_20","answerKey":"A"}
{"id":"Mercury_7084245","answerKey":"C"}
{"id":"Mercury_SC_405137","answerKey":"C"}
{"id":"VASoL_2010_3_2","answerKey":"C"}
{"id":"Mercury_7008033","answerKey":"D"}
{"id":"Mercury_SC_400987","answerKey":"B"}
{"id":"MDSA_2008_5_29","answerKey":"B"}
{"id":"Mercury_7041055","answerKey":"B"}
{"id":"NYSEDREGENTS_2013_4_7","answerKey":"C"}
{"id":"AKDE&ED_2008_4_8","answerKey":"B"}
{"id":"Mercury_SC_402064","answerKey":"D"}
{"id":"Mercury_404898","answerKey":"A"}
{"id":"Mercury_177678","answerKey":"D"}
{"id":"Mercury_SC_LBS10606","answerKey":"D"}
{"id":"Mercury_SC_402630","answerKey":"D"}
{"id":"Mercury_416586","answerKey":"A"}
{"id":"MCAS_2006_9_38","answerKey":"B"}
{"id":"Mercury_SC_415469","answerKey":"B"}
{"id":"MDSA_2009_8_12","answerKey":"A"}
{"id":"Mercury_7015803","answerKey":"A"}
{"id":"Mercury_414146","answerKey":"A"}
{"id":"Mercury_405895","answerKey":"C"}
{"id":"Mercury_7063980","answerKey":"A"}
{"id":"Mercury_7029855","answerKey":"B"}
{"id":"Mercury_404720","answerKey":"D"}
{"id":"Mercury_7071960","answerKey":"A"}
{"id":"Mercury_7004778","answerKey":"A"}
{"id":"Mercury_7263848","answerKey":"C"}
{"id":"Mercury_SC_402260","answerKey":"C"}
{"id":"Mercury_SC_LBS10041","answerKey":"D"}
{"id":"Mercury_SC_401786","answerKey":"C"}
{"id":"ACTAAP_2011_5_13","answerKey":"A"}
{"id":"LEAP__4_10227","answerKey":"D"}
{"id":"Mercury_7195125","answerKey":"D"}
{"id":"LEAP__8_10368","answerKey":"C"}
{"id":"Mercury_SC_401307","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_8","answerKey":"4"}
{"id":"TIMSS_2003_8_pg40","answerKey":"B"}
{"id":"MDSA_2008_8_38","answerKey":"A"}
{"id":"MCAS_2012_5_22237","answerKey":"C"}
{"id":"Mercury_7223948","answerKey":"B"}
{"id":"Mercury_7263428","answerKey":"C"}
{"id":"Mercury_SC_402983","answerKey":"A"}
{"id":"MCAS_2012_8_23648","answerKey":"D"}
{"id":"Mercury_LBS10976","answerKey":"A"}
{"id":"Mercury_SC_408925","answerKey":"C"}
{"id":"MDSA_2009_8_30","answerKey":"D"}
{"id":"NAEP_2000_8_S21+3","answerKey":"D"}
{"id":"Mercury_7106785","answerKey":"C"}
{"id":"Mercury_SC_400193","answerKey":"B"}
{"id":"Mercury_7029313","answerKey":"C"}
{"id":"MCAS_2000_8_23","answerKey":"A"}
{"id":"Mercury_7001435","answerKey":"B"}
{"id":"Mercury_SC_402031","answerKey":"C"}
{"id":"MDSA_2008_8_39","answerKey":"A"}
{"id":"Mercury_SC_LBS10619","answerKey":"C"}
{"id":"Mercury_SC_414361","answerKey":"D"}
{"id":"Mercury_400089","answerKey":"C"}
{"id":"Mercury_7165690","answerKey":"D"}
{"id":"LEAP_2006_8_10411","answerKey":"A"}
{"id":"Mercury_SC_408984","answerKey":"C"}
{"id":"Mercury_SC_400173","answerKey":"A"}
{"id":"Mercury_7201058","answerKey":"D"}
{"id":"Mercury_7011288","answerKey":"D"}
{"id":"MCAS_1999_4_16","answerKey":"A"}
{"id":"Mercury_7168613","answerKey":"C"}
{"id":"Mercury_400574","answerKey":"D"}
{"id":"MDSA_2010_5_3","answerKey":"A"}
{"id":"Mercury_401785","answerKey":"B"}
{"id":"Mercury_7145548","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_8_35","answerKey":"4"}
{"id":"Mercury_7141733","answerKey":"D"}
{"id":"Mercury_SC_LBS10388","answerKey":"B"}
{"id":"Mercury_SC_401615","answerKey":"A"}
{"id":"Mercury_7007858","answerKey":"A"}
{"id":"Mercury_7212940","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_4_17","answerKey":"D"}
{"id":"Mercury_7110968","answerKey":"A"}
{"id":"Mercury_7174143","answerKey":"C"}
{"id":"Mercury_7191520","answerKey":"B"}
{"id":"Mercury_7027108","answerKey":"A"}
{"id":"AKDE&ED_2008_8_51","answerKey":"A"}
{"id":"MCAS_2012_8_23639","answerKey":"B"}
{"id":"VASoL_2007_5_30","answerKey":"D"}
{"id":"AMP_2016_8_48","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_21","answerKey":"2"}
{"id":"Mercury_7220465","answerKey":"B"}
{"id":"Mercury_404086","answerKey":"D"}
{"id":"MDSA_2007_8_6","answerKey":"B"}
{"id":"Mercury_7179288","answerKey":"B"}
{"id":"Mercury_7163363","answerKey":"D"}
{"id":"Mercury_SC_405883","answerKey":"D"}
{"id":"Mercury_7081655","answerKey":"B"}
{"id":"Mercury_176820","answerKey":"C"}
{"id":"Mercury_SC_400583","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_8_37","answerKey":"2"}
{"id":"NYSEDREGENTS_2008_4_20","answerKey":"B"}
{"id":"Mercury_7085383","answerKey":"D"}
{"id":"Mercury_7268275","answerKey":"A"}
{"id":"MSA_2012_8_28","answerKey":"C"}
{"id":"Mercury_7213045","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_4_22","answerKey":"B"}
{"id":"Mercury_416684","answerKey":"A"}
{"id":"Mercury_184643","answerKey":"D"}
{"id":"Mercury_SC_408871","answerKey":"B"}
{"id":"MSA_2012_5_16","answerKey":"B"}
{"id":"Mercury_SC_415078","answerKey":"A"}
{"id":"AIMS_2009_4_20","answerKey":"A"}
{"id":"Mercury_7094938","answerKey":"D"}
{"id":"OHAT_2008_5_26","answerKey":"A"}
{"id":"Mercury_SC_415417","answerKey":"A"}
{"id":"MCAS_2006_9_7","answerKey":"A"}
{"id":"Mercury_7222758","answerKey":"B"}
{"id":"Mercury_SC_401305","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_4_25","answerKey":"C"}
{"id":"Mercury_7145583","answerKey":"B"}
{"id":"Mercury_7100748","answerKey":"D"}
{"id":"ACTAAP_2008_5_15","answerKey":"A"}
{"id":"Mercury_185115","answerKey":"B"}
{"id":"VASoL_2009_5_28","answerKey":"C"}
{"id":"Mercury_SC_413085","answerKey":"B"}
{"id":"Mercury_SC_405004","answerKey":"B"}
{"id":"VASoL_2009_5_20","answerKey":"C"}
{"id":"MCAS_2003_8_28","answerKey":"C"}
{"id":"Mercury_7167038","answerKey":"C"}
{"id":"MCAS_1999_4_32","answerKey":"D"}
{"id":"MCAS_2001_5_14","answerKey":"B"}
{"id":"ACTAAP_2012_7_2","answerKey":"D"}
{"id":"Mercury_400704","answerKey":"A"}
{"id":"Mercury_7215478","answerKey":"A"}
{"id":"Mercury_7033600","answerKey":"A"}
{"id":"ACTAAP_2015_7_4","answerKey":"A"}
{"id":"Mercury_SC_LBS10949","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_30","answerKey":"4"}
{"id":"Mercury_411782","answerKey":"D"}
{"id":"LEAP_2003_8_10393","answerKey":"A"}
{"id":"ACTAAP_2010_5_15","answerKey":"D"}
{"id":"Mercury_7214235","answerKey":"A"}
{"id":"MCAS_2011_8_17685","answerKey":"D"}
{"id":"Mercury_7165813","answerKey":"B"}
{"id":"Mercury_7071750","answerKey":"D"}
{"id":"Mercury_7120820","answerKey":"C"}
{"id":"Mercury_7004585","answerKey":"D"}
{"id":"MSA_2012_8_34","answerKey":"A"}
{"id":"AKDE&ED_2012_4_48","answerKey":"B"}
{"id":"Mercury_SC_408747","answerKey":"A"}
{"id":"Mercury_7267838","answerKey":"A"}
{"id":"Mercury_SC_411419","answerKey":"C"}
{"id":"NCEOGA_2013_5_16","answerKey":"C"}
{"id":"Mercury_7093100","answerKey":"B"}
{"id":"Mercury_SC_407608","answerKey":"C"}
{"id":"ACTAAP_2013_7_10","answerKey":"C"}
{"id":"Mercury_7271320","answerKey":"A"}
{"id":"Mercury_412714","answerKey":"D"}
{"id":"Mercury_7250268","answerKey":"A"}
{"id":"VASoL_2008_5_33","answerKey":"D"}
{"id":"Mercury_SC_408390","answerKey":"A"}
{"id":"CSZ_2008_5_CSZ10081","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_8_32","answerKey":"3"}
{"id":"Mercury_SC_400679","answerKey":"B"}
{"id":"Mercury_7058520","answerKey":"C"}
{"id":"Mercury_7005075","answerKey":"C"}
{"id":"Mercury_189105","answerKey":"D"}
{"id":"Mercury_7091893","answerKey":"C"}
{"id":"Mercury_LBS10706","answerKey":"D"}
{"id":"Mercury_7012583","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_8_2","answerKey":"2"}
{"id":"MCAS_2003_5_20","answerKey":"A"}
{"id":"Mercury_SC_402077","answerKey":"C"}
{"id":"Mercury_7235935","answerKey":"A"}
{"id":"Mercury_7216773","answerKey":"D"}
{"id":"Mercury_SC_416653","answerKey":"C"}
{"id":"Mercury_7007770","answerKey":"D"}
{"id":"Mercury_7137008","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_4_4","answerKey":"A"}
{"id":"ACTAAP_2008_5_14","answerKey":"A"}
{"id":"TIMSS_2007_8_pg26","answerKey":"C"}
{"id":"Mercury_SC_400529","answerKey":"A"}
{"id":"Mercury_409465","answerKey":"D"}
{"id":"Mercury_7201268","answerKey":"B"}
{"id":"Mercury_408093","answerKey":"A"}
{"id":"MCAS_2014_8_19","answerKey":"D"}
{"id":"Mercury_7214043","answerKey":"C"}
{"id":"LEAP__8_10369","answerKey":"D"}
{"id":"Mercury_7032883","answerKey":"C"}
{"id":"Mercury_7235673","answerKey":"A"}
{"id":"LEAP__4_10224","answerKey":"B"}
{"id":"VASoL_2007_5_12","answerKey":"A"}
{"id":"Mercury_7007648","answerKey":"D"}
{"id":"Mercury_406802","answerKey":"A"}
{"id":"Mercury_SC_407692","answerKey":"B"}
{"id":"Mercury_SC_LBS10516","answerKey":"B"}
{"id":"Mercury_7122973","answerKey":"B"}
{"id":"MCAS_2013_8_29426","answerKey":"D"}
{"id":"Mercury_7274365","answerKey":"B"}
{"id":"Mercury_7234343","answerKey":"C"}
{"id":"Mercury_SC_413242","answerKey":"A"}
{"id":"Mercury_SC_410624","answerKey":"A"}
{"id":"Mercury_7254538","answerKey":"B"}
{"id":"Mercury_182368","answerKey":"A"}
{"id":"Mercury_7005128","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_8_12","answerKey":"2"}
{"id":"Mercury_7016363","answerKey":"C"}
{"id":"TIMSS_2011_8_pg23","answerKey":"D"}
{"id":"MEA_2014_8_18","answerKey":"C"}
{"id":"NCEOGA_2013_5_13","answerKey":"A"}
{"id":"Mercury_7110215","answerKey":"D"}
{"id":"Mercury_7173653","answerKey":"B"}
{"id":"MCAS_2003_8_33","answerKey":"D"}
{"id":"ACTAAP_2007_7_15","answerKey":"B"}
{"id":"Mercury_7179253","answerKey":"C"}
{"id":"Mercury_404991","answerKey":"A"}
{"id":"Mercury_SC_408509","answerKey":"C"}
{"id":"Mercury_7126875","answerKey":"C"}
{"id":"Mercury_7068950","answerKey":"D"}
{"id":"Mercury_7201040","answerKey":"D"}
{"id":"Mercury_SC_400365","answerKey":"D"}
{"id":"Mercury_7172813","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_4_8","answerKey":"C"}
{"id":"Mercury_SC_400843","answerKey":"A"}
{"id":"Mercury_7008593","answerKey":"A"}
{"id":"Mercury_7200568","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_6","answerKey":"1"}
{"id":"TIMSS_1995_8_P4","answerKey":"E"}
{"id":"Mercury_SC_408851","answerKey":"C"}
{"id":"CSZ30179","answerKey":"D"}
{"id":"Mercury_SC_400214","answerKey":"A"}
{"id":"Mercury_SC_409669","answerKey":"C"}
{"id":"Mercury_7041213","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_23","answerKey":"A"}
{"id":"Mercury_7269098","answerKey":"D"}
{"id":"Mercury_SC_403011","answerKey":"D"}
{"id":"Mercury_7017903","answerKey":"A"}
{"id":"Mercury_SC_406663","answerKey":"B"}
{"id":"Mercury_SC_400611","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_8_22","answerKey":"1"}
{"id":"NYSEDREGENTS_2012_8_31","answerKey":"2"}
{"id":"MCAS_2000_4_32","answerKey":"C"}
{"id":"TIMSS_1995_8_K15","answerKey":"D"}
{"id":"Mercury_SC_405792","answerKey":"A"}
{"id":"ACTAAP_2007_7_13","answerKey":"B"}
{"id":"Mercury_SC_402116","answerKey":"C"}
{"id":"MSA_2015_5_34","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_30","answerKey":"2"}
{"id":"Mercury_7222303","answerKey":"C"}
{"id":"Mercury_7220483","answerKey":"B"}
{"id":"Mercury_7034773","answerKey":"C"}
{"id":"Mercury_7210893","answerKey":"A"}
{"id":"Mercury_187093","answerKey":"B"}
{"id":"Mercury_7271355","answerKey":"B"}
{"id":"Mercury_7033530","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_4_8","answerKey":"D"}
{"id":"Mercury_SC_409026","answerKey":"C"}
{"id":"Mercury_7214253","answerKey":"D"}
{"id":"Mercury_SC_400923","answerKey":"C"}
{"id":"Mercury_SC_405496","answerKey":"A"}
{"id":"MCAS_2000_8_16","answerKey":"B"}
{"id":"Mercury_7090580","answerKey":"B"}
{"id":"Mercury_7044555","answerKey":"D"}
{"id":"Mercury_SC_401789","answerKey":"C"}
{"id":"Mercury_7001873","answerKey":"D"}
{"id":"Mercury_SC_415071","answerKey":"B"}
{"id":"Mercury_SC_408991","answerKey":"D"}
{"id":"TIMSS_2011_8_pg101","answerKey":"B"}
{"id":"Mercury_SC_407441","answerKey":"D"}
{"id":"MCAS_2015_5_11","answerKey":"D"}
{"id":"Mercury_7091875","answerKey":"A"}
{"id":"Mercury_SC_400848","answerKey":"D"}
{"id":"Mercury_SC_400844","answerKey":"B"}
{"id":"Mercury_177730","answerKey":"A"}
{"id":"Mercury_406923","answerKey":"C"}
{"id":"Mercury_SC_LBS10169","answerKey":"A"}
{"id":"Mercury_185500","answerKey":"A"}
{"id":"ACTAAP_2008_7_16","answerKey":"A"}
{"id":"Mercury_SC_401642","answerKey":"A"}
{"id":"LEAP__7_10355","answerKey":"D"}
{"id":"Mercury_SC_400836","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_4_2","answerKey":"B"}
{"id":"Mercury_SC_406885","answerKey":"A"}
{"id":"Mercury_SC_401160","answerKey":"B"}
{"id":"Mercury_7221393","answerKey":"B"}
{"id":"Mercury_7071610","answerKey":"D"}
{"id":"MCAS_2010_8_12006","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_4_13","answerKey":"C"}
{"id":"MCAS_2000_8_20","answerKey":"A"}
{"id":"Mercury_7137445","answerKey":"C"}
{"id":"Mercury_7283693","answerKey":"A"}
{"id":"MEA_2016_5_5","answerKey":"A"}
{"id":"TIMSS_2011_8_pg50","answerKey":"A"}
{"id":"Mercury_7217228","answerKey":"C"}
{"id":"Mercury_SC_401190","answerKey":"A"}
{"id":"Mercury_7210105","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_10","answerKey":"B"}
{"id":"Mercury_7122955","answerKey":"C"}
{"id":"MCAS_1998_8_11","answerKey":"B"}
{"id":"Mercury_7092278","answerKey":"C"}
{"id":"Mercury_SC_401658","answerKey":"C"}
{"id":"Mercury_7211558","answerKey":"C"}
{"id":"Mercury_SC_402070","answerKey":"C"}
{"id":"Mercury_7004130","answerKey":"C"}
{"id":"MCAS_2000_8_12","answerKey":"C"}
{"id":"Mercury_7141785","answerKey":"A"}
{"id":"Mercury_SC_406040","answerKey":"D"}
{"id":"Mercury_7115255","answerKey":"B"}
{"id":"VASoL_2008_5_5","answerKey":"C"}
{"id":"Mercury_7040950","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_15","answerKey":"1"}
{"id":"Mercury_404900","answerKey":"A"}
{"id":"Mercury_SC_415583","answerKey":"C"}
{"id":"OHAT_2009_5_42","answerKey":"B"}
{"id":"Mercury_400522","answerKey":"D"}
{"id":"Mercury_7116358","answerKey":"B"}
{"id":"Mercury_SC_407796","answerKey":"C"}
{"id":"Mercury_7043978","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_8_13","answerKey":"4"}
{"id":"Mercury_7025025","answerKey":"A"}
{"id":"Mercury_7188825","answerKey":"D"}
{"id":"Mercury_SC_400118","answerKey":"A"}
{"id":"Mercury_SC_410972","answerKey":"C"}
{"id":"Mercury_7015575","answerKey":"B"}
{"id":"Mercury_7184748","answerKey":"D"}
{"id":"VASoL_2010_5_15","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_4_5","answerKey":"D"}
{"id":"Mercury_7108063","answerKey":"B"}
{"id":"Mercury_SC_400609","answerKey":"D"}
{"id":"Mercury_416411","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_8_16","answerKey":"2"}
{"id":"Mercury_7027388","answerKey":"A"}
{"id":"Mercury_7271425","answerKey":"B"}
{"id":"Mercury_7221445","answerKey":"A"}
{"id":"ACTAAP_2010_5_9","answerKey":"A"}
{"id":"Mercury_SC_401781","answerKey":"C"}
{"id":"Mercury_7032690","answerKey":"B"}
{"id":"MCAS_2004_5_2","answerKey":"A"}
{"id":"VASoL_2010_3_20","answerKey":"A"}
{"id":"Mercury_191503","answerKey":"B"}
{"id":"Mercury_7119875","answerKey":"C"}
{"id":"Mercury_7140298","answerKey":"B"}
{"id":"AKDE&ED_2008_8_43","answerKey":"D"}
{"id":"Mercury_7057243","answerKey":"C"}
{"id":"Mercury_404987","answerKey":"B"}
{"id":"CSZ_2004_5_CSZ10100","answerKey":"D"}
{"id":"MCAS_2013_8_29417","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_14","answerKey":"3"}
{"id":"NYSEDREGENTS_2008_4_21","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_4_20","answerKey":"C"}
{"id":"Mercury_7041930","answerKey":"D"}
{"id":"Mercury_SC_413009","answerKey":"A"}
{"id":"Mercury_401010","answerKey":"A"}
{"id":"Mercury_416405","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_27","answerKey":"1"}
{"id":"Mercury_183925","answerKey":"D"}
{"id":"Mercury_7004760","answerKey":"D"}
{"id":"Mercury_7075180","answerKey":"A"}
{"id":"Mercury_SC_401283","answerKey":"B"}
{"id":"Mercury_7092453","answerKey":"C"}
{"id":"MCAS_2002_8_14","answerKey":"A"}
{"id":"Mercury_7210385","answerKey":"B"}
{"id":"Mercury_SC_402627","answerKey":"C"}
{"id":"Mercury_7013230","answerKey":"A"}
{"id":"Mercury_178553","answerKey":"D"}
{"id":"Mercury_SC_416097","answerKey":"B"}
{"id":"Mercury_404096","answerKey":"B"}
{"id":"Mercury_SC_401164","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_8_37","answerKey":"2"}
{"id":"Mercury_7144498","answerKey":"A"}
{"id":"Mercury_7010973","answerKey":"C"}
{"id":"ACTAAP_2007_7_2","answerKey":"C"}
{"id":"Mercury_411070","answerKey":"A"}
{"id":"Mercury_7220518","answerKey":"A"}
{"id":"Mercury_SC_408243","answerKey":"D"}
{"id":"Mercury_176610","answerKey":"C"}
{"id":"Mercury_SC_406070","answerKey":"A"}
{"id":"Mercury_SC_408905","answerKey":"A"}
{"id":"MCAS_2000_8_2","answerKey":"C"}
{"id":"NYSEDREGENTS_2013_8_5","answerKey":"3"}
{"id":"Mercury_182928","answerKey":"A"}
{"id":"NYSEDREGENTS_2013_8_41","answerKey":"4"}
{"id":"Mercury_SC_LBS10940","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_8_28","answerKey":"3"}
{"id":"Mercury_SC_405784","answerKey":"B"}
{"id":"Mercury_SC_407155","answerKey":"C"}
{"id":"Mercury_7069510","answerKey":"C"}
{"id":"Mercury_7027563","answerKey":"B"}
{"id":"Mercury_181545","answerKey":"C"}
{"id":"Mercury_410702","answerKey":"A"}
{"id":"Mercury_7171588","answerKey":"A"}
{"id":"Mercury_412298","answerKey":"D"}
{"id":"Mercury_7013843","answerKey":"D"}
{"id":"Mercury_7170643","answerKey":"A"}
{"id":"Mercury_SC_401319","answerKey":"D"}
{"id":"Mercury_7211418","answerKey":"D"}
{"id":"Mercury_SC_405232","answerKey":"C"}
{"id":"Mercury_7141418","answerKey":"B"}
{"id":"Mercury_SC_LBS10064","answerKey":"D"}
{"id":"Mercury_7085435","answerKey":"B"}
{"id":"Mercury_7012688","answerKey":"A"}
{"id":"AIMS_2009_4_13","answerKey":"A"}
{"id":"MCAS_2003_5_8","answerKey":"C"}
{"id":"Mercury_7239453","answerKey":"C"}
{"id":"Mercury_180390","answerKey":"D"}
{"id":"Mercury_SC_407405","answerKey":"D"}
{"id":"MCAS_1998_8_12","answerKey":"B"}
{"id":"AIMS_2008_8_7","answerKey":"A"}
{"id":"Mercury_7217910","answerKey":"D"}
{"id":"Mercury_7029768","answerKey":"C"}
{"id":"NCEOGA_2013_5_6","answerKey":"B"}
{"id":"TIMSS_2003_8_pg48","answerKey":"A"}
{"id":"Mercury_7003623","answerKey":"B"}
{"id":"Mercury_7099260","answerKey":"D"}
{"id":"Mercury_7025095","answerKey":"D"}
{"id":"Mercury_7009748","answerKey":"D"}
{"id":"Mercury_7008418","answerKey":"C"}
{"id":"MDSA_2012_8_1","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_29","answerKey":"D"}
{"id":"MCAS_2000_8_28","answerKey":"D"}
{"id":"Mercury_7056735","answerKey":"C"}
{"id":"Mercury_SC_406840","answerKey":"A"}
{"id":"MCAS_2011_8_17681","answerKey":"B"}
{"id":"Mercury_SC_406703","answerKey":"C"}
{"id":"LEAP_2000_8_1","answerKey":"C"}
{"id":"VASoL_2010_3_6","answerKey":"B"}
{"id":"OHAT_2010_8_24","answerKey":"B"}
{"id":"Mercury_7207183","answerKey":"A"}
{"id":"Mercury_SC_400005","answerKey":"A"}
{"id":"MCAS_2011_8_17692","answerKey":"B"}
{"id":"Mercury_7207113","answerKey":"D"}
{"id":"LEAP_2004_4_10260","answerKey":"B"}
{"id":"AKDE&ED_2012_4_29","answerKey":"A"}
{"id":"Mercury_7085470","answerKey":"B"}
{"id":"NCEOGA_2013_8_3","answerKey":"B"}
{"id":"Mercury_7164920","answerKey":"D"}
{"id":"MCAS_2011_8_17697","answerKey":"B"}
{"id":"Mercury_SC_LBS10939","answerKey":"A"}
{"id":"VASoL_2010_3_11","answerKey":"C"}
{"id":"VASoL_2009_3_21","answerKey":"C"}
{"id":"TIMSS_2003_8_pg33","answerKey":"C"}
{"id":"Mercury_7016415","answerKey":"C"}
{"id":"Mercury_7029820","answerKey":"D"}
{"id":"Mercury_7014403","answerKey":"A"}
{"id":"Mercury_SC_413637","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_4_19","answerKey":"A"}
{"id":"Mercury_415267","answerKey":"D"}
{"id":"Mercury_7136150","answerKey":"C"}
{"id":"Mercury_7199045","answerKey":"A"}
{"id":"Mercury_7267505","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_8_5","answerKey":"3"}
{"id":"NYSEDREGENTS_2013_8_6","answerKey":"1"}
{"id":"Mercury_7175735","answerKey":"C"}
{"id":"Mercury_SC_405026","answerKey":"A"}
{"id":"MCAS_2012_8_23643","answerKey":"B"}
{"id":"Mercury_SC_400031","answerKey":"A"}
{"id":"Mercury_SC_400041","answerKey":"C"}
{"id":"ACTAAP_2009_5_1","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_4_5","answerKey":"B"}
{"id":"Mercury_SC_416110","answerKey":"D"}
{"id":"Mercury_SC_405999","answerKey":"A"}
{"id":"ACTAAP_2010_5_2","answerKey":"D"}
{"id":"Mercury_7008698","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_15","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_8_16","answerKey":"4"}
{"id":"Mercury_412695","answerKey":"A"}
{"id":"Mercury_7003990","answerKey":"D"}
{"id":"Mercury_400203","answerKey":"B"}
{"id":"MCAS_1999_4_7","answerKey":"B"}
{"id":"Mercury_7160738","answerKey":"D"}
{"id":"Mercury_7283710","answerKey":"A"}
{"id":"MDSA_2007_8_60","answerKey":"D"}
{"id":"Mercury_7201320","answerKey":"A"}
{"id":"Mercury_SC_408939","answerKey":"C"}
{"id":"Mercury_7103548","answerKey":"B"}
{"id":"Mercury_7001540","answerKey":"C"}
{"id":"MCAS_2000_8_32","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_8_18","answerKey":"2"}
{"id":"NYSEDREGENTS_2013_4_10","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_8_17","answerKey":"1"}
{"id":"Mercury_7268993","answerKey":"D"}
{"id":"Mercury_7210158","answerKey":"B"}
{"id":"Mercury_SC_LBS10266","answerKey":"C"}
{"id":"Mercury_411744","answerKey":"A"}
{"id":"LEAP_2012_8_10442","answerKey":"C"}
{"id":"MCAS_1999_8_33","answerKey":"B"}
{"id":"ACTAAP_2007_7_34","answerKey":"C"}
{"id":"Mercury_7216983","answerKey":"C"}
{"id":"Mercury_7137078","answerKey":"C"}
{"id":"Mercury_7222145","answerKey":"D"}
{"id":"Mercury_7042543","answerKey":"B"}
{"id":"Mercury_SC_400675","answerKey":"B"}
{"id":"Mercury_LBS10778","answerKey":"A"}
{"id":"Mercury_7027335","answerKey":"C"}
{"id":"Mercury_7012863","answerKey":"A"}
{"id":"Mercury_SC_415395","answerKey":"A"}
{"id":"Mercury_SC_LBS10579","answerKey":"D"}
{"id":"MCAS_2004_5_30","answerKey":"A"}
{"id":"MCAS_1999_4_21","answerKey":"B"}
{"id":"MCAS_2004_8_2","answerKey":"C"}
{"id":"Mercury_SC_407690","answerKey":"C"}
{"id":"Mercury_7148155","answerKey":"B"}
{"id":"MCAS_1999_8_32","answerKey":"C"}
{"id":"Mercury_7032480","answerKey":"A"}
{"id":"Mercury_7041265","answerKey":"A"}
{"id":"Mercury_7001208","answerKey":"C"}
{"id":"Mercury_416643","answerKey":"D"}
{"id":"TAKS_2009_5_1","answerKey":"D"}
{"id":"Mercury_SC_402261","answerKey":"C"}
{"id":"MDSA_2009_8_36","answerKey":"B"}
{"id":"Mercury_7017955","answerKey":"D"}
{"id":"Mercury_178815","answerKey":"C"}
{"id":"Mercury_405875","answerKey":"C"}
{"id":"ACTAAP_2012_7_14","answerKey":"A"}
{"id":"Mercury_7005093","answerKey":"B"}
{"id":"TIMSS_2011_4_pg24","answerKey":"B"}
{"id":"Mercury_7056490","answerKey":"A"}
{"id":"Mercury_SC_401673","answerKey":"A"}
{"id":"Mercury_SC_408556","answerKey":"B"}
{"id":"Mercury_7008908","answerKey":"B"}
{"id":"TIMSS_2003_8_pg42","answerKey":"C"}
{"id":"TAKS_2009_5_18","answerKey":"D"}
{"id":"VASoL_2009_5_8","answerKey":"B"}
{"id":"Mercury_7008103","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_28","answerKey":"D"}
{"id":"MEAP_2005_8_31","answerKey":"D"}
{"id":"Mercury_401583","answerKey":"A"}
{"id":"Mercury_177188","answerKey":"A"}
{"id":"Mercury_SC_414356","answerKey":"B"}
{"id":"Mercury_416539","answerKey":"A"}
{"id":"Mercury_SC_LBS10276","answerKey":"C"}
{"id":"Mercury_7092243","answerKey":"C"}
{"id":"Mercury_7085313","answerKey":"A"}
{"id":"VASoL_2009_5_23","answerKey":"B"}
{"id":"Mercury_7083790","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_8_26","answerKey":"4"}
{"id":"Mercury_7171763","answerKey":"C"}
{"id":"Mercury_SC_400001","answerKey":"C"}
{"id":"MDSA_2008_5_23","answerKey":"A"}
{"id":"Mercury_416374","answerKey":"C"}
{"id":"MDSA_2008_8_25","answerKey":"C"}
{"id":"MCAS_2004_5_36","answerKey":"C"}
{"id":"Mercury_SC_400125","answerKey":"A"}
{"id":"TIMSS_1995_8_Q11","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_4_14","answerKey":"C"}
{"id":"Mercury_7245123","answerKey":"B"}
{"id":"MCAS_2000_8_37","answerKey":"A"}
{"id":"TIMSS_1995_8_J8","answerKey":"B"}
{"id":"Mercury_7268765","answerKey":"A"}
{"id":"Mercury_7248290","answerKey":"A"}
{"id":"Mercury_406741","answerKey":"D"}
{"id":"Mercury_SC_401123","answerKey":"B"}
{"id":"MCAS_1998_8_22","answerKey":"A"}
{"id":"Mercury_401187","answerKey":"C"}
{"id":"MCAS_2009_5_6512","answerKey":"C"}
{"id":"Mercury_406779","answerKey":"C"}
{"id":"Mercury_7093083","answerKey":"C"}
{"id":"MSA_2012_8_9","answerKey":"C"}
{"id":"Mercury_403681","answerKey":"C"}
{"id":"Mercury_184380","answerKey":"A"}
{"id":"NCEOGA_2013_5_39","answerKey":"B"}
{"id":"Mercury_SC_409053","answerKey":"B"}
{"id":"Mercury_7017885","answerKey":"A"}
{"id":"Mercury_7246365","answerKey":"C"}
{"id":"Mercury_7239698","answerKey":"C"}
{"id":"Mercury_SC_408857","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_4_22","answerKey":"C"}
{"id":"Mercury_7172865","answerKey":"C"}
{"id":"Mercury_SC_415363","answerKey":"B"}
{"id":"Mercury_7085418","answerKey":"B"}
{"id":"MCAS_8_2015_13","answerKey":"A"}
{"id":"AKDE&ED_2008_4_39","answerKey":"D"}
{"id":"ACTAAP_2010_7_8","answerKey":"D"}
{"id":"Mercury_SC_401648","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_11","answerKey":"1"}
{"id":"Mercury_7216615","answerKey":"B"}
{"id":"LEAP_2003_8_10394","answerKey":"D"}
{"id":"Mercury_7216090","answerKey":"B"}
{"id":"Mercury_405452","answerKey":"A"}
{"id":"MCAS_8_2015_8","answerKey":"B"}
{"id":"Mercury_SC_401780","answerKey":"D"}
{"id":"Mercury_7262850","answerKey":"C"}
{"id":"MDSA_2009_4_24","answerKey":"A"}
{"id":"Mercury_7004638","answerKey":"D"}
{"id":"Mercury_7124355","answerKey":"A"}
{"id":"Mercury_SC_416530","answerKey":"C"}
{"id":"MCAS_2000_8_36","answerKey":"B"}
{"id":"MCAS_2010_5_11995","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_4_8","answerKey":"A"}
{"id":"Mercury_7103320","answerKey":"C"}
{"id":"Mercury_7134803","answerKey":"C"}
{"id":"MCAS_2004_9_21","answerKey":"A"}
{"id":"Mercury_SC_409038","answerKey":"A"}
{"id":"ACTAAP_2014_5_15","answerKey":"C"}
{"id":"Mercury_7216860","answerKey":"C"}
{"id":"Mercury_415689","answerKey":"A"}
{"id":"NCEOGA_2013_5_32","answerKey":"B"}
{"id":"Mercury_7082653","answerKey":"B"}
{"id":"MCAS_2006_8_21","answerKey":"A"}
{"id":"Mercury_SC_LBS11012","answerKey":"D"}
{"id":"Mercury_7094465","answerKey":"A"}
{"id":"MCAS_2002_5_7","answerKey":"D"}
{"id":"NYSEDREGENTS_2008_8_14","answerKey":"4"}
{"id":"Mercury_7016695","answerKey":"D"}
{"id":"Mercury_SC_407288","answerKey":"D"}
{"id":"Mercury_7001453","answerKey":"D"}
{"id":"Mercury_404153","answerKey":"D"}
{"id":"MCAS_2005_9_11","answerKey":"C"}
{"id":"Mercury_SC_405200","answerKey":"D"}
{"id":"Mercury_7268783","answerKey":"D"}
{"id":"NYSEDREGENTS_2015_4_18","answerKey":"C"}
{"id":"Mercury_407129","answerKey":"A"}
{"id":"VASoL_2008_3_30","answerKey":"D"}
{"id":"Mercury_190803","answerKey":"B"}
{"id":"ACTAAP_2007_7_35","answerKey":"C"}
{"id":"Mercury_7186130","answerKey":"C"}
{"id":"Mercury_SC_400526","answerKey":"A"}
{"id":"TIMSS_2003_4_pg13","answerKey":"A"}
{"id":"Mercury_7239715","answerKey":"A"}
{"id":"Mercury_SC_LBS10269","answerKey":"D"}
{"id":"Mercury_SC_406793","answerKey":"A"}
{"id":"NCEOGA_2013_5_18","answerKey":"C"}
{"id":"Mercury_SC_416521","answerKey":"D"}
{"id":"Mercury_7026618","answerKey":"A"}
{"id":"Mercury_7228113","answerKey":"B"}
{"id":"Mercury_176558","answerKey":"A"}
{"id":"Mercury_SC_408424","answerKey":"A"}
{"id":"Mercury_7166390","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_4_2","answerKey":"B"}
{"id":"Mercury_SC_406709","answerKey":"D"}
{"id":"Mercury_7218243","answerKey":"C"}
{"id":"Mercury_189980","answerKey":"C"}
{"id":"Mercury_SC_405074","answerKey":"C"}
{"id":"Mercury_406917","answerKey":"B"}
{"id":"MCAS_2004_9_6-v1","answerKey":"C"}
{"id":"Mercury_SC_408578","answerKey":"C"}
{"id":"Mercury_SC_407195","answerKey":"C"}
{"id":"Mercury_SC_402054","answerKey":"D"}
{"id":"Mercury_7175770","answerKey":"D"}
{"id":"Mercury_SC_LBS10267","answerKey":"A"}
{"id":"MCAS_2000_8_9","answerKey":"B"}
{"id":"Mercury_404101","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_25","answerKey":"C"}
{"id":"Mercury_7092365","answerKey":"B"}
{"id":"ACTAAP_2013_5_4","answerKey":"B"}
{"id":"TIMSS_2007_8_pg85","answerKey":"B"}
{"id":"Mercury_SC_415369","answerKey":"A"}
{"id":"Mercury_400332","answerKey":"A"}
{"id":"Mercury_178535","answerKey":"C"}
{"id":"Mercury_7013370","answerKey":"B"}
{"id":"Mercury_7212153","answerKey":"D"}
{"id":"MCAS_2003_8_17","answerKey":"A"}
{"id":"TIMSS_2003_4_pg20","answerKey":"A"}
{"id":"MCAS_2012_8_23654","answerKey":"D"}
{"id":"MSA_2015_5_44","answerKey":"D"}
{"id":"Mercury_405943","answerKey":"B"}
{"id":"MCAS_8_2014_21","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_43","answerKey":"2"}
{"id":"Mercury_SC_405197","answerKey":"A"}
{"id":"Mercury_406776","answerKey":"C"}
{"id":"Mercury_7263288","answerKey":"B"}
{"id":"Mercury_7136483","answerKey":"A"}
{"id":"MEA_2013_5_9","answerKey":"A"}
{"id":"Mercury_7043908","answerKey":"A"}
{"id":"Mercury_SC_401815","answerKey":"B"}
{"id":"MEA_2013_5_11","answerKey":"A"}
{"id":"Mercury_7166863","answerKey":"B"}
{"id":"MEA_2010_8_2","answerKey":"D"}
{"id":"LEAP__7_10346","answerKey":"B"}
{"id":"LEAP__7_10349","answerKey":"C"}
{"id":"CSZ_2005_5_CSZ10247","answerKey":"A"}
{"id":"Mercury_7234028","answerKey":"B"}
{"id":"Mercury_7018393","answerKey":"D"}
{"id":"Mercury_7043680","answerKey":"B"}
{"id":"NYSEDREGENTS_2015_4_6","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_8_35","answerKey":"2"}
{"id":"NYSEDREGENTS_2008_4_23","answerKey":"C"}
{"id":"Mercury_7235970","answerKey":"B"}
{"id":"Mercury_SC_413146","answerKey":"A"}
{"id":"Mercury_7017185","answerKey":"A"}
{"id":"Mercury_182700","answerKey":"A"}
{"id":"Mercury_7043085","answerKey":"B"}
{"id":"LEAP__5_10319","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_8_32","answerKey":"1"}
{"id":"Mercury_7216790","answerKey":"B"}
{"id":"Mercury_7084438","answerKey":"B"}
{"id":"Mercury_7213815","answerKey":"C"}
{"id":"MCAS_2000_4_23","answerKey":"A"}
{"id":"NAEP_2005_4_S12+7","answerKey":"A"}
{"id":"Mercury_SC_401163","answerKey":"C"}
{"id":"Mercury_7026898","answerKey":"C"}
{"id":"MDSA_2011_8_39","answerKey":"A"}
{"id":"MCAS_2004_5_16","answerKey":"C"}
{"id":"Mercury_7206010","answerKey":"B"}
{"id":"Mercury_7138425","answerKey":"D"}
{"id":"Mercury_SC_LBS10948","answerKey":"D"}
{"id":"Mercury_LBS10577","answerKey":"B"}
{"id":"Mercury_7223090","answerKey":"D"}
{"id":"Mercury_7014035","answerKey":"C"}
{"id":"MCAS_1999_8_36","answerKey":"C"}
{"id":"LEAP__5_10309","answerKey":"A"}
{"id":"Mercury_7068670","answerKey":"D"}
{"id":"Mercury_7160545","answerKey":"B"}
{"id":"MDSA_2013_8_7","answerKey":"C"}
{"id":"Mercury_7159880","answerKey":"B"}
{"id":"Mercury_7075128","answerKey":"C"}
{"id":"Mercury_7124093","answerKey":"B"}
{"id":"MCAS_2004_9_4","answerKey":"A"}
{"id":"Mercury_7123305","answerKey":"B"}
{"id":"Mercury_SC_416134","answerKey":"D"}
{"id":"Mercury_401646","answerKey":"B"}
{"id":"MCAS_2004_8_5","answerKey":"B"}
{"id":"Mercury_SC_408426","answerKey":"C"}
{"id":"Mercury_7005828","answerKey":"B"}
{"id":"Mercury_7197890","answerKey":"B"}
{"id":"ACTAAP_2009_7_6","answerKey":"B"}
{"id":"Mercury_400752","answerKey":"B"}
{"id":"Mercury_7233345","answerKey":"B"}
{"id":"Mercury_7154228","answerKey":"B"}
{"id":"ACTAAP_2009_5_15","answerKey":"D"}
{"id":"Mercury_SC_LBS10042","answerKey":"B"}
{"id":"Mercury_406785","answerKey":"A"}
{"id":"Mercury_7077648","answerKey":"B"}
{"id":"Mercury_LBS10205","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_8_18","answerKey":"1"}
{"id":"Mercury_SC_LBS10611","answerKey":"C"}
{"id":"Mercury_7185710","answerKey":"A"}
{"id":"Mercury_188948","answerKey":"B"}
{"id":"Mercury_7179865","answerKey":"B"}
{"id":"MCAS_2012_5_3","answerKey":"C"}
{"id":"Mercury_7159583","answerKey":"A"}
{"id":"Mercury_7251003","answerKey":"B"}
{"id":"Mercury_7245158","answerKey":"D"}
{"id":"ACTAAP_2010_5_12","answerKey":"C"}
{"id":"MSA_2015_5_7","answerKey":"B"}
{"id":"Mercury_7077578","answerKey":"B"}
{"id":"ACTAAP_2007_7_9","answerKey":"A"}
{"id":"Mercury_7222898","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_8_41","answerKey":"2"}
{"id":"TIMSS_2003_4_pg34","answerKey":"B"}
{"id":"MEA_2016_8_2","answerKey":"B"}
{"id":"AIMS_2008_4_13","answerKey":"C"}
{"id":"VASoL_2011_5_1","answerKey":"C"}
{"id":"Mercury_7126613","answerKey":"D"}
{"id":"Mercury_7161035","answerKey":"C"}
{"id":"Mercury_7205468","answerKey":"B"}
{"id":"MCAS_2010_8_12020","answerKey":"A"}
{"id":"ACTAAP_2007_7_14","answerKey":"C"}
{"id":"Mercury_SC_400851","answerKey":"D"}
{"id":"Mercury_SC_LBS10344","answerKey":"A"}
{"id":"VASoL_2008_5_40","answerKey":"A"}
{"id":"Mercury_7239435","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_8_16","answerKey":"2"}
{"id":"Mercury_SC_402614","answerKey":"A"}
{"id":"ACTAAP_2010_5_13","answerKey":"A"}
{"id":"Mercury_SC_410877","answerKey":"D"}
{"id":"Mercury_7163240","answerKey":"C"}
{"id":"Mercury_7145950","answerKey":"A"}
{"id":"TIMSS_1995_8_Q16","answerKey":"D"}
{"id":"Mercury_SC_400003","answerKey":"B"}
{"id":"Mercury_SC_401001","answerKey":"A"}
{"id":"NAEP_2009_8_S10+2","answerKey":"B"}
{"id":"Mercury_7235848","answerKey":"D"}
{"id":"Mercury_SC_402645","answerKey":"B"}
{"id":"Mercury_7230125","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_8_25","answerKey":"3"}
{"id":"TAKS_2009_8_38","answerKey":"D"}
{"id":"TIMSS_2011_4_pg42","answerKey":"B"}
{"id":"MCAS_2006_9_17","answerKey":"A"}
{"id":"Mercury_7012950","answerKey":"B"}
{"id":"Mercury_7219783","answerKey":"B"}
{"id":"Mercury_7041230","answerKey":"A"}
{"id":"Mercury_406786","answerKey":"B"}
{"id":"Mercury_7219713","answerKey":"D"}
{"id":"Mercury_401659","answerKey":"A"}
{"id":"Mercury_7171535","answerKey":"D"}
{"id":"Mercury_182403","answerKey":"B"}
{"id":"Mercury_7085453","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_4_19","answerKey":"B"}
{"id":"Mercury_SC_400195","answerKey":"B"}
{"id":"Mercury_192203","answerKey":"B"}
{"id":"Mercury_7222635","answerKey":"C"}
{"id":"Mercury_SC_400307","answerKey":"D"}
{"id":"Mercury_7238910","answerKey":"B"}
{"id":"VASoL_2008_5_34","answerKey":"A"}
{"id":"Mercury_7217980","answerKey":"D"}
{"id":"Mercury_7228533","answerKey":"C"}
{"id":"Mercury_405953","answerKey":"B"}
{"id":"Mercury_SC_400034","answerKey":"A"}
{"id":"MDSA_2011_4_5","answerKey":"A"}
{"id":"Mercury_7092330","answerKey":"B"}
{"id":"Mercury_SC_400604","answerKey":"C"}
{"id":"Mercury_7162575","answerKey":"B"}
{"id":"Mercury_407053","answerKey":"C"}
{"id":"Mercury_416580","answerKey":"C"}
{"id":"MCAS_2007_5_4785","answerKey":"A"}
{"id":"Mercury_SC_400035","answerKey":"C"}
{"id":"Mercury_412696","answerKey":"A"}
{"id":"Mercury_7145215","answerKey":"B"}
{"id":"Mercury_7213308","answerKey":"C"}
{"id":"Mercury_401762","answerKey":"D"}
{"id":"Mercury_405772","answerKey":"A"}
{"id":"Mercury_7100643","answerKey":"D"}
{"id":"Mercury_7212503","answerKey":"D"}
{"id":"Mercury_SC_402254","answerKey":"D"}
{"id":"MDSA_2009_8_38","answerKey":"B"}
{"id":"Mercury_SC_405504","answerKey":"C"}
{"id":"Mercury_7009608","answerKey":"C"}
{"id":"Mercury_7041913","answerKey":"B"}
{"id":"Mercury_417472","answerKey":"B"}
{"id":"Mercury_7248098","answerKey":"D"}
{"id":"MCAS_2005_9_3-v1","answerKey":"D"}
{"id":"Mercury_401337","answerKey":"C"}
{"id":"Mercury_SC_406480","answerKey":"B"}
{"id":"TIMSS_2011_8_pg77","answerKey":"A"}
{"id":"Mercury_7056473","answerKey":"C"}
{"id":"Mercury_7214585","answerKey":"D"}
{"id":"Mercury_SC_LBS10591","answerKey":"A"}
{"id":"Mercury_7015680","answerKey":"C"}
{"id":"LEAP_2002_8_10387","answerKey":"C"}
{"id":"Mercury_SC_416131","answerKey":"C"}
{"id":"TIMSS_2007_8_pg125","answerKey":"B"}
{"id":"Mercury_7016013","answerKey":"C"}
{"id":"Mercury_7283465","answerKey":"A"}
{"id":"MCAS_2005_8_21","answerKey":"B"}
{"id":"ACTAAP_2013_7_14","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_21","answerKey":"C"}
{"id":"Mercury_7270043","answerKey":"A"}
{"id":"Mercury_SC_400593","answerKey":"A"}
{"id":"Mercury_175280","answerKey":"A"}
{"id":"Mercury_SC_400017","answerKey":"D"}
{"id":"TIMSS_2003_8_pg44","answerKey":"D"}
{"id":"Mercury_SC_401128","answerKey":"C"}
{"id":"MEA_2011_8_15","answerKey":"D"}
{"id":"Mercury_7009835","answerKey":"A"}
{"id":"MDSA_2009_8_37","answerKey":"C"}
{"id":"Mercury_7008680","answerKey":"C"}
{"id":"CSZ_2004_5_CSZ20156","answerKey":"D"}
{"id":"Mercury_7182158","answerKey":"D"}
{"id":"Mercury_7163415","answerKey":"D"}
{"id":"Mercury_SC_416171","answerKey":"B"}
{"id":"Mercury_SC_415335","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_4_14","answerKey":"B"}
{"id":"Mercury_7217613","answerKey":"C"}
{"id":"Mercury_7084088","answerKey":"B"}
{"id":"Mercury_7217683","answerKey":"D"}
{"id":"Mercury_7064015","answerKey":"D"}
{"id":"Mercury_7034895","answerKey":"D"}
{"id":"Mercury_SC_LBS10588","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_8_40","answerKey":"4"}
{"id":"Mercury_SC_406718","answerKey":"B"}
{"id":"Mercury_7246313","answerKey":"C"}
{"id":"VASoL_2009_3_32","answerKey":"D"}
{"id":"Mercury_7221025","answerKey":"A"}
{"id":"Mercury_SC_407450","answerKey":"C"}
{"id":"Mercury_SC_LBS10620","answerKey":"A"}
{"id":"Mercury_SC_400132","answerKey":"D"}
{"id":"Mercury_401396","answerKey":"B"}
{"id":"Mercury_7098053","answerKey":"A"}
{"id":"Mercury_415270","answerKey":"C"}
{"id":"Mercury_7213763","answerKey":"B"}
{"id":"NAEP_2005_8_S11+3","answerKey":"D"}
{"id":"NYSEDREGENTS_2013_8_26","answerKey":"1"}
{"id":"Mercury_SC_409576","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_4_21","answerKey":"A"}
{"id":"Mercury_SC_401613","answerKey":"D"}
{"id":"Mercury_404892","answerKey":"C"}
{"id":"Mercury_7094605","answerKey":"A"}
{"id":"Mercury_SC_408782","answerKey":"C"}
{"id":"Mercury_SC_413532","answerKey":"A"}
{"id":"VASoL_2010_3_28","answerKey":"D"}
{"id":"Mercury_7216580","answerKey":"C"}
{"id":"Mercury_7227920","answerKey":"D"}
{"id":"Mercury_416502","answerKey":"D"}
{"id":"Mercury_7234220","answerKey":"C"}
{"id":"Mercury_SC_414130","answerKey":"B"}
{"id":"Mercury_7234273","answerKey":"C"}
{"id":"MCAS_1998_4_24","answerKey":"D"}
{"id":"AKDE&ED_2008_8_5","answerKey":"D"}
{"id":"Mercury_SC_400126","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_4_26","answerKey":"C"}
{"id":"VASoL_2007_5_17","answerKey":"D"}
{"id":"MCAS_2005_5_14","answerKey":"D"}
{"id":"Mercury_7247100","answerKey":"C"}
{"id":"ACTAAP_2010_7_16","answerKey":"D"}
{"id":"Mercury_SC_405935","answerKey":"B"}
{"id":"NYSEDREGENTS_2008_8_4","answerKey":"3"}
{"id":"NYSEDREGENTS_2012_4_4","answerKey":"B"}
{"id":"MCAS_2000_4_25","answerKey":"C"}
{"id":"Mercury_7252298","answerKey":"D"}
{"id":"MEA_2010_8_19","answerKey":"B"}
{"id":"MCAS_2006_9_36","answerKey":"B"}
{"id":"NYSEDREGENTS_2010_8_32","answerKey":"1"}
{"id":"MCAS_2006_9_28","answerKey":"C"}
{"id":"Mercury_7128240","answerKey":"B"}
{"id":"Mercury_7033635","answerKey":"C"}
{"id":"Mercury_405771","answerKey":"C"}
{"id":"Mercury_SC_408629","answerKey":"C"}
{"id":"Mercury_400806","answerKey":"B"}
{"id":"Mercury_7207148","answerKey":"D"}
{"id":"Mercury_7205328","answerKey":"C"}
{"id":"Mercury_7071663","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_4_3","answerKey":"D"}
{"id":"Mercury_416686","answerKey":"B"}
{"id":"Mercury_7011235","answerKey":"B"}
{"id":"Mercury_7090668","answerKey":"C"}
{"id":"Mercury_7001628","answerKey":"C"}
{"id":"Mercury_407263","answerKey":"C"}
{"id":"MEA_2013_5_7","answerKey":"A"}
{"id":"TIMSS_2007_4_pg70","answerKey":"D"}
{"id":"Mercury_7165043","answerKey":"B"}
{"id":"Mercury_7057768","answerKey":"A"}
{"id":"Mercury_7227903","answerKey":"B"}
{"id":"ACTAAP_2014_5_5","answerKey":"D"}
{"id":"Mercury_7026933","answerKey":"D"}
{"id":"Mercury_SC_402284","answerKey":"B"}
{"id":"Mercury_7183103","answerKey":"D"}
{"id":"TIMSS_2007_8_pg63","answerKey":"A"}
{"id":"Mercury_7094115","answerKey":"C"}
{"id":"Mercury_SC_401221","answerKey":"D"}
{"id":"Mercury_SC_401300","answerKey":"B"}
{"id":"Mercury_7264040","answerKey":"B"}
{"id":"Mercury_SC_409014","answerKey":"B"}
{"id":"Mercury_SC_408554","answerKey":"C"}
{"id":"Mercury_7267908","answerKey":"A"}
{"id":"Mercury_7137270","answerKey":"A"}
{"id":"Mercury_SC_401803","answerKey":"D"}
{"id":"LEAP_2004_8_10398","answerKey":"C"}
{"id":"Mercury_7271723","answerKey":"B"}
{"id":"Mercury_SC_406475","answerKey":"A"}
{"id":"Mercury_7165795","answerKey":"A"}
{"id":"Mercury_7185185","answerKey":"B"}
{"id":"ACTAAP_2014_5_6","answerKey":"A"}
{"id":"MCAS_2003_5_28","answerKey":"A"}
{"id":"Mercury_SC_407701","answerKey":"B"}
{"id":"Mercury_401215","answerKey":"C"}
{"id":"MEAP_2005_8_15","answerKey":"A"}
{"id":"MCAS_2003_8_12","answerKey":"C"}
{"id":"Mercury_SC_401257","answerKey":"A"}
{"id":"NYSEDREGENTS_2008_8_7","answerKey":"1"}
{"id":"MSA_2012_5_1","answerKey":"B"}
{"id":"Mercury_407767","answerKey":"D"}
{"id":"Mercury_7077525","answerKey":"C"}
{"id":"MCAS_2002_8_8","answerKey":"D"}
{"id":"NAEP_2000_8_S21+8","answerKey":"A"}
{"id":"MCAS_2009_5_6511","answerKey":"D"}
{"id":"MCAS_2000_8_34","answerKey":"C"}
{"id":"ACTAAP_2007_7_30","answerKey":"A"}
{"id":"Mercury_SC_406000","answerKey":"D"}
{"id":"Mercury_SC_415699","answerKey":"B"}
{"id":"Mercury_SC_402125","answerKey":"A"}
{"id":"Mercury_7082478","answerKey":"B"}
{"id":"Mercury_7220955","answerKey":"D"}
{"id":"VASoL_2007_5_10","answerKey":"B"}
{"id":"Mercury_7217875","answerKey":"C"}
{"id":"TIMSS_2003_8_pg18","answerKey":"C"}
{"id":"VASoL_2009_3_29","answerKey":"C"}
{"id":"Mercury_179218","answerKey":"D"}
{"id":"Mercury_SC_400192","answerKey":"B"}
{"id":"MSA_2012_5_34","answerKey":"D"}
{"id":"Mercury_7221463","answerKey":"D"}
{"id":"Mercury_7001855","answerKey":"B"}
{"id":"Mercury_7083563","answerKey":"B"}
{"id":"Mercury_7213483","answerKey":"D"}
{"id":"Mercury_7234115","answerKey":"A"}
{"id":"Mercury_7221218","answerKey":"B"}
{"id":"Mercury_7004008","answerKey":"C"}
{"id":"TIMSS_2007_4_pg82","answerKey":"B"}
{"id":"Mercury_400715","answerKey":"C"}
{"id":"Mercury_7018533","answerKey":"A"}
{"id":"ACTAAP_2007_7_33","answerKey":"D"}
{"id":"Mercury_7041528","answerKey":"B"}
{"id":"MCAS_2002_8_12","answerKey":"C"}
{"id":"Mercury_SC_415455","answerKey":"C"}
{"id":"MCAS_2006_8_10","answerKey":"D"}
{"id":"LEAP__5_10317","answerKey":"C"}
{"id":"Mercury_7041563","answerKey":"B"}
{"id":"Mercury_417464","answerKey":"C"}
{"id":"MCAS_2004_9_7","answerKey":"C"}
{"id":"Mercury_7143080","answerKey":"B"}
{"id":"TIMSS_2003_8_pg122","answerKey":"C"}
{"id":"TIMSS_2011_8_pg66","answerKey":"A"}
{"id":"Mercury_7201758","answerKey":"C"}
{"id":"MDSA_2012_8_35","answerKey":"D"}
{"id":"MCAS_2005_9_16","answerKey":"C"}
{"id":"Mercury_7267855","answerKey":"C"}
{"id":"Mercury_7037363","answerKey":"B"}
{"id":"Mercury_406955","answerKey":"A"}
{"id":"TIMSS_2003_8_pg19","answerKey":"B"}
{"id":"NYSEDREGENTS_2012_8_12","answerKey":"1"}
{"id":"MCAS_2013_8_29434","answerKey":"B"}
{"id":"VASoL_2009_3_7","answerKey":"A"}
{"id":"Mercury_SC_407444","answerKey":"B"}
{"id":"Mercury_7001348","answerKey":"A"}
{"id":"Mercury_417569","answerKey":"B"}
{"id":"Mercury_7115098","answerKey":"A"}
{"id":"Mercury_7094815","answerKey":"B"}
{"id":"Mercury_7236058","answerKey":"B"}
{"id":"Mercury_7162488","answerKey":"D"}
{"id":"Mercury_SC_401602","answerKey":"A"}
{"id":"MCAS_2010_8_12013","answerKey":"B"}
{"id":"Mercury_SC_415478","answerKey":"D"}
{"id":"Mercury_179113","answerKey":"C"}
{"id":"Mercury_SC_415001","answerKey":"B"}
{"id":"Mercury_LBS10254","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_27","answerKey":"A"}
{"id":"Mercury_7142958","answerKey":"C"}
{"id":"NYSEDREGENTS_2012_4_6","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_4_3","answerKey":"D"}
{"id":"Mercury_SC_408703","answerKey":"D"}
{"id":"Mercury_7128188","answerKey":"B"}
{"id":"Mercury_SC_400177","answerKey":"A"}
{"id":"MEA_2014_8_7","answerKey":"C"}
{"id":"Mercury_7037380","answerKey":"B"}
{"id":"Mercury_7217718","answerKey":"C"}
{"id":"Mercury_7110303","answerKey":"A"}
{"id":"Mercury_7122850","answerKey":"C"}
{"id":"LEAP_2009_8_10429","answerKey":"A"}
{"id":"MCAS_2004_8_25","answerKey":"A"}
{"id":"MCAS_2004_5_25","answerKey":"C"}
{"id":"MCAS_1999_8_10","answerKey":"C"}
{"id":"Mercury_7075110","answerKey":"C"}
{"id":"TIMSS_2003_4_pg21","answerKey":"C"}
{"id":"NCEOGA_2013_8_16","answerKey":"A"}
{"id":"NCEOGA_2013_8_48","answerKey":"A"}
{"id":"VASoL_2009_5_18","answerKey":"A"}
{"id":"Mercury_416468","answerKey":"B"}
{"id":"Mercury_7032848","answerKey":"C"}
{"id":"NYSEDREGENTS_2015_8_22","answerKey":"1"}
{"id":"Mercury_SC_402112","answerKey":"C"}
{"id":"Mercury_412701","answerKey":"B"}
{"id":"Mercury_7092208","answerKey":"A"}
{"id":"ACTAAP_2011_5_6","answerKey":"C"}
{"id":"VASoL_2007_3_30","answerKey":"B"}
{"id":"Mercury_SC_401288","answerKey":"A"}
{"id":"Mercury_SC_400702","answerKey":"B"}
{"id":"Mercury_7182963","answerKey":"C"}
{"id":"Mercury_SC_415773","answerKey":"A"}
{"id":"NYSEDREGENTS_2012_8_14","answerKey":"1"}
{"id":"MEA_2011_8_16","answerKey":"B"}
{"id":"Mercury_SC_402075","answerKey":"D"}
{"id":"Mercury_7270533","answerKey":"A"}
{"id":"Mercury_402091","answerKey":"C"}
{"id":"Mercury_417470","answerKey":"B"}
{"id":"Mercury_7200060","answerKey":"D"}
{"id":"Mercury_SC_416107","answerKey":"A"}
{"id":"MCAS_2004_9_5-v1","answerKey":"D"}
{"id":"MCAS_2000_4_31","answerKey":"B"}
{"id":"ACTAAP_2013_5_13","answerKey":"D"}
{"id":"Mercury_7268205","answerKey":"A"}
{"id":"NYSEDREGENTS_2010_4_20","answerKey":"D"}
{"id":"Mercury_7270130","answerKey":"A"}
{"id":"TAKS_2009_8_11","answerKey":"A"}
{"id":"MCAS_2010_5_11988","answerKey":"A"}
{"id":"Mercury_416504","answerKey":"C"}
{"id":"Mercury_SC_402082","answerKey":"B"}
{"id":"Mercury_7223265","answerKey":"D"}
{"id":"MEA_2014_5_8","answerKey":"A"}
{"id":"Mercury_400396","answerKey":"A"}
{"id":"LEAP_2012_8_10440","answerKey":"B"}
{"id":"TAKS_2009_5_28","answerKey":"C"}
{"id":"OHAT_2009_5_1","answerKey":"C"}
{"id":"MCAS_2000_8_3","answerKey":"D"}
{"id":"VASoL_2011_5_38","answerKey":"C"}
{"id":"Mercury_7038465","answerKey":"C"}
{"id":"NYSEDREGENTS_2008_8_29","answerKey":"4"}
{"id":"Mercury_7013825","answerKey":"C"}
{"id":"Mercury_7274313","answerKey":"C"}
{"id":"Mercury_7098543","answerKey":"B"}
{"id":"Mercury_SC_400854","answerKey":"D"}
{"id":"MCAS_2004_5_21","answerKey":"A"}
{"id":"Mercury_SC_403014","answerKey":"C"}
{"id":"Mercury_7205923","answerKey":"A"}
{"id":"AKDE&ED_2012_8_4","answerKey":"C"}
{"id":"Mercury_7228200","answerKey":"C"}
{"id":"MCAS_2005_5_22","answerKey":"B"}
{"id":"Mercury_SC_400186","answerKey":"D"}
{"id":"MDSA_2007_8_25","answerKey":"C"}
{"id":"Mercury_184240","answerKey":"B"}
{"id":"MCAS_2012_8_23644","answerKey":"C"}
{"id":"Mercury_7109428","answerKey":"A"}
{"id":"MCAS_2011_8_17698","answerKey":"D"}
{"id":"Mercury_SC_407164","answerKey":"D"}
{"id":"Mercury_7038500","answerKey":"D"}
{"id":"Mercury_7080605","answerKey":"C"}
{"id":"Mercury_417568","answerKey":"C"}
{"id":"Mercury_7214620","answerKey":"C"}
{"id":"Mercury_7176103","answerKey":"A"}
{"id":"Mercury_SC_415767","answerKey":"D"}
{"id":"Mercury_7034825","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_17","answerKey":"4"}
{"id":"Mercury_405140","answerKey":"C"}
{"id":"TIMSS_1995_8_M10","answerKey":"D"}
{"id":"VASoL_2009_5_12","answerKey":"C"}
{"id":"Mercury_7163328","answerKey":"A"}
{"id":"Mercury_190190","answerKey":"D"}
{"id":"Mercury_400099","answerKey":"C"}
{"id":"VASoL_2007_3_18","answerKey":"C"}
{"id":"Mercury_7004498","answerKey":"C"}
{"id":"Mercury_400608","answerKey":"B"}
{"id":"VASoL_2009_5_17","answerKey":"C"}
{"id":"Mercury_SC_413302","answerKey":"A"}
{"id":"NYSEDREGENTS_2015_4_27","answerKey":"B"}
{"id":"Mercury_7212048","answerKey":"A"}
{"id":"MCAS_2002_8_7","answerKey":"A"}
{"id":"Mercury_412148","answerKey":"A"}
{"id":"NCEOGA_2013_5_52","answerKey":"B"}
{"id":"Mercury_SC_400164","answerKey":"D"}
{"id":"NYSEDREGENTS_2012_4_11","answerKey":"D"}
{"id":"Mercury_SC_406626","answerKey":"C"}
{"id":"Mercury_SC_409143","answerKey":"B"}
{"id":"Mercury_7006108","answerKey":"D"}
{"id":"TIMSS_2007_8_pg77","answerKey":"D"}
{"id":"Mercury_7082075","answerKey":"C"}
{"id":"Mercury_7068985","answerKey":"C"}
{"id":"NYSEDREGENTS_2010_8_39","answerKey":"3"}
{"id":"Mercury_402286","answerKey":"A"}
{"id":"Mercury_SC_407170","answerKey":"D"}
{"id":"NYSEDREGENTS_2010_4_17","answerKey":"A"}
{"id":"Mercury_SC_401276","answerKey":"A"}
{"id":"Mercury_7219520","answerKey":"C"}
{"id":"Mercury_7008820","answerKey":"C"}
{"id":"Mercury_SC_416175","answerKey":"D"}
{"id":"Mercury_7041545","answerKey":"B"}
{"id":"Mercury_7007438","answerKey":"D"}
{"id":"Mercury_7007788","answerKey":"B"}
{"id":"Mercury_SC_401787","answerKey":"B"}
{"id":"Mercury_7165200","answerKey":"D"}
{"id":"Mercury_416355","answerKey":"A"}
{"id":"Mercury_7252403","answerKey":"A"}
{"id":"Mercury_7271215","answerKey":"B"}
{"id":"TIMSS_1995_8_I14","answerKey":"D"}
{"id":"Mercury_SC_407065","answerKey":"A"}
{"id":"Mercury_402560","answerKey":"A"}
{"id":"Mercury_SC_404974","answerKey":"D"}
{"id":"Mercury_7001715","answerKey":"D"}
{"id":"AIMS_2009_8_26","answerKey":"D"}
{"id":"CSZ30499","answerKey":"B"}
{"id":"Mercury_416369","answerKey":"B"}
{"id":"Mercury_400758","answerKey":"C"}
{"id":"Mercury_7264163","answerKey":"D"}
{"id":"Mercury_7032148","answerKey":"A"}
{"id":"Mercury_7187250","answerKey":"C"}
{"id":"Mercury_SC_416164","answerKey":"A"}
{"id":"Mercury_416378","answerKey":"B"}
{"id":"Mercury_401603","answerKey":"B"}
{"id":"Mercury_SC_408709","answerKey":"D"}
{"id":"MCAS_2005_8_27","answerKey":"A"}
{"id":"Mercury_7009713","answerKey":"A"}
{"id":"MCAS_2010_5_13","answerKey":"B"}
{"id":"Mercury_7248255","answerKey":"A"}
{"id":"Mercury_7185255","answerKey":"C"}
{"id":"Mercury_SC_406653","answerKey":"B"}
{"id":"Mercury_7193690","answerKey":"A"}
{"id":"Mercury_SC_405495","answerKey":"A"}
{"id":"MCAS_1999_4_25","answerKey":"B"}
{"id":"ACTAAP_2009_7_12","answerKey":"A"}
{"id":"Mercury_7083458","answerKey":"B"}
{"id":"Mercury_7030450","answerKey":"C"}
{"id":"TIMSS_2003_8_pg101","answerKey":"C"}
{"id":"Mercury_7057960","answerKey":"C"}
{"id":"VASoL_2009_5_34","answerKey":"A"}
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-easy/question-answers.jsonl/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-easy/question-answers.jsonl",
"repo_id": "ContextualSP",
"token_count": 41514
} | 246 |
from typing import Dict, NamedTuple, Iterable
from evaluation.metric import Metric
class EvaluationAverages(NamedTuple):
inputs: float
outputs: float
conversions: float
moves: float
overall: float
class Evaluation:
def __init__(self, scores: Dict[int, "QuestionScores"]) -> None: # type: ignore
precision = Evaluation._precision(scores.values())
recall = Evaluation._recall(scores.values())
self.inputs = Metric(precision=precision.inputs, recall=recall.inputs)
self.outputs = Metric(precision=precision.outputs, recall=recall.outputs)
self.conversions = Metric(precision=precision.conversions, recall=recall.conversions)
self.moves = Metric(precision=precision.moves, recall=recall.moves)
self.overall = Metric(precision=precision.overall, recall=recall.overall)
@staticmethod
def _precision(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.precision
outputs += score.outputs.precision
conversions += score.conversions.precision
moves += score.moves.precision
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
@staticmethod
def _recall(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.recall
outputs += score.outputs.recall
conversions += score.conversions.recall
moves += score.moves.recall
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluation/evaluation.py/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluation/evaluation.py",
"repo_id": "ContextualSP",
"token_count": 1254
} | 247 |
#!/bin/bash
set -e
export PYTHONPATH=.
echo
echo ----------------------------------
echo unit tests
echo ----------------------------------
echo
set -x
pytest
set +x
echo
echo ----------------------------------
echo mypy
echo ----------------------------------
echo
set -x
mypy $(find . -type f -name '*.py')
echo "Hurray, mypy didn't find problems with the code."
set +x
echo
echo ----------------------------------
echo testfiles-1
echo ----------------------------------
echo
set -x
python3 evaluator.py -p testfiles-1/predictions.tsv -a testfiles-1/answers.tsv -o /tmp/metrics.json
if [ "$(cat /tmp/metrics.json)" != '{"precision": 0.743, "recall": 0.43, "f1": 0.545}' ]; then
echo File /tmp/metrics.json looks wrong.
exit 1
fi
echo File /tmp/metrics.json looks okay.
set +x
echo
echo ----------------------------------
echo testfiles-2
echo ----------------------------------
echo
set -x
python3 evaluator.py -p testfiles-2/predictions.tsv -a testfiles-2/answers.tsv -o /tmp/metrics.json
if [ "$(cat /tmp/metrics.json)" != '{"precision": 1.0, "recall": 1.0, "f1": 1.0}' ]; then
echo File /tmp/metrics.json looks wrong.
exit 1
fi
echo File /tmp/metrics.json looks okay.
set +x
echo
echo ----------------------------------
echo testfiles-3
echo ----------------------------------
echo
set -x
python3 evaluator.py -p testfiles-3/predictions.tsv -a testfiles-3/answers.tsv -o /tmp/metrics.json
if [ "$(cat /tmp/metrics.json)" != '{"precision": 0.833, "recall": 0.583, "f1": 0.686}' ]; then
echo File /tmp/metrics.json looks wrong.
exit 1
fi
echo File /tmp/metrics.json looks okay.
set +x
echo
echo ----------------------------------
echo testfiles-4
echo ----------------------------------
echo
set -x
set +e
python3 evaluator.py -p testfiles-4/predictions.tsv -a testfiles-4/answers.tsv -o /tmp/metrics.json
exit_status=$?
if [ $exit_status -eq 2 ]; then
echo "Got expected exit status: $exit_status"
else
echo "Got unexpected exit status: $exit_status"
exit 1
fi
set -e
set +x
echo
echo ----------------------------------
echo testfiles-5
echo ----------------------------------
echo
set -x
set +e
python3 evaluator.py -p testfiles-5/predictions.tsv -a testfiles-5/answers.tsv -o /tmp/metrics.json
exit_status=$?
if [ $exit_status -eq 2 ]; then
echo "Got expected exit status: $exit_status"
else
echo "Got unexpected exit status: $exit_status"
exit 1
fi
set -e
set +x
echo
echo ----------------------------------
echo testfiles-6
echo ----------------------------------
echo
set -x
set +e
python3 evaluator.py -p testfiles-6/predictions.tsv -a testfiles-6/answers.tsv -o /tmp/metrics.json
exit_status=$?
if [ $exit_status -eq 2 ]; then
echo "Got expected exit status: $exit_status"
else
echo "Got unexpected exit status: $exit_status"
exit 1
fi
set -e
set +x
echo
echo ----------------------------------
echo testfiles-7
echo ----------------------------------
echo
set -x
python3 evaluator.py -p testfiles-7/predictions.tsv -a testfiles-7/answers.tsv -o /tmp/metrics.json
if [ "$(cat /tmp/metrics.json)" != '{"precision": 0.617, "recall": 0.448, "f1": 0.519}' ]; then
echo File /tmp/metrics.json looks wrong.
exit 1
fi
echo File /tmp/metrics.json looks okay.
set +x
echo
echo ----------------------------------
echo Docker
echo ----------------------------------
echo
set -x
./test-in-docker.sh
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/test.sh/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/test.sh",
"repo_id": "ContextualSP",
"token_count": 1183
} | 248 |
The file [answers.jsonl](answers.jsonl) are the answers against which predictions are evaluated on the [SciTail Leaderboard](https://leaderboard.allenai.org/).
The file [dummy-predictions.csv](dummy-predictions.csv) is a valid example prediction file that can be submitted to the [SciTail Leaderboard](https://leaderboard.allenai.org/).
This is a prediction that every pair of sentences is predicted to entail, and scores about 40% correct.
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/data/test/README.md/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/data/test/README.md",
"repo_id": "ContextualSP",
"token_count": 123
} | 249 |
import json
import sys
def evaluate(answer_file, prediction_file):
answer_by_id = {}
for line in open(answer_file).readlines():
struct = json.loads(line)
answer_by_id[struct["id"]] = struct
prediction_by_id = {}
for line in open(prediction_file).readlines():
struct = json.loads(line)
prediction_by_id[struct["id"]] = struct
answer_count = len(answer_by_id)
prediction_count = len(prediction_by_id)
if answer_count != prediction_count:
print(
f"Prediction count ({prediction_count}) doesn't match answer count ({answer_count})"
)
sys.exit(1)
total = 0
correct = 0
total_start = 0
correct_start = 0
total_end = 0
correct_end = 0
story_prediction_map = {}
for answer in answer_by_id.values():
answer_id = answer["id"]
prediction = prediction_by_id.get(answer_id, None)
if not prediction:
print(f"Prediction for id {answer_id} missing")
sys.exit(1)
hypothesis = answer["query"]
story = answer["story"]
answer_label = answer["label"]
prediction_label = prediction["label"]
if story not in story_prediction_map:
story_prediction_map[story] = []
total += 1
if answer_label == prediction_label:
correct += 1
story_prediction_map[story].append(True)
else:
story_prediction_map[story].append(False)
if "starts before" in hypothesis or "starts after" in hypothesis:
total_start += 1
if answer_label == prediction_label:
correct_start += 1
else:
total_end += 1
if answer_label == prediction_label:
correct_end += 1
s_total = 0
s_correct = 0
for key in story_prediction_map:
s_total += 1
cv = True
for v in story_prediction_map[key]:
cv = cv and v
if cv:
s_correct += 1
total_acc = float(correct) / float(total)
start_acc = float(correct_start) / float(total_start)
end_acc = float(correct_end) / float(total_end)
story_em = float(s_correct) / float(s_total)
return total_acc, start_acc, end_acc, story_em
def main():
import argparse
parser = argparse.ArgumentParser(
description="Evaluate leaderboard predictions for questions."
)
parser.add_argument(
"--question_answers",
"-qa",
help="Filename of the question answers to read.",
required=True,
)
parser.add_argument(
"--predictions",
"-p",
help="Filename of the leaderboard predictions",
required=True,
)
parser.add_argument(
"--output", "-o", help="Output results to this file.", required=True
)
args = parser.parse_args()
total_acc, start_acc, end_acc, story_em = evaluate(
args.question_answers, args.predictions
)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(
json.dumps(
{
"total_acc": total_acc,
"start_acc": start_acc,
"end_acc": end_acc,
"story_em": story_em,
}
)
)
if __name__ == "__main__":
main()
| ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/evaluator/evaluator.py/0 | {
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/evaluator/evaluator.py",
"repo_id": "ContextualSP",
"token_count": 1550
} | 250 |
if [ -d "./bookcorpus_premise" ]
then
rm -r ./bookcorpus_premise
fi
mkdir ./bookcorpus_premise
python corpus_construction.py --start 0 --end 500 --indicator_type premise &
python corpus_construction.py --start 500 --end 1000 --indicator_type premise &
python corpus_construction.py --start 1000 --end 1500 --indicator_type premise &
python corpus_construction.py --start 1500 --end 2000 --indicator_type premise &
python corpus_construction.py --start 2000 --end 2500 --indicator_type premise &
python corpus_construction.py --start 2500 --end 3000 --indicator_type premise &
python corpus_construction.py --start 3000 --end 3500 --indicator_type premise &
python corpus_construction.py --start 3500 --end 4000 --indicator_type premise &
python corpus_construction.py --start 4000 --end 4500 --indicator_type premise &
python corpus_construction.py --start 4500 --end 5000 --indicator_type premise &
python corpus_construction.py --start 5000 --end 5500 --indicator_type premise &
python corpus_construction.py --start 5500 --end 6000 --indicator_type premise &
python corpus_construction.py --start 6000 --end 6500 --indicator_type premise &
python corpus_construction.py --start 6500 --end 7000 --indicator_type premise &
python corpus_construction.py --start 7000 --end 7500 --indicator_type premise &
python corpus_construction.py --start 7500 --end 8000 --indicator_type premise &
python corpus_construction.py --start 8000 --end 8500 --indicator_type premise &
python corpus_construction.py --start 8500 --end 9000 --indicator_type premise &
python corpus_construction.py --start 9000 --end 9500 --indicator_type premise &
python corpus_construction.py --start 9500 --end 10000 --indicator_type premise &
python corpus_construction.py --start 10000 --end 10500 --indicator_type premise &
python corpus_construction.py --start 10500 --end 11000 --indicator_type premise &
python corpus_construction.py --start 11000 --end 11500 --indicator_type premise &
python corpus_construction.py --start 11500 --end 12000 --indicator_type premise &
python corpus_construction.py --start 12000 --end 12500 --indicator_type premise &
python corpus_construction.py --start 12500 --end 13000 --indicator_type premise &
python corpus_construction.py --start 13000 --end 13500 --indicator_type premise &
python corpus_construction.py --start 13500 --end 14000 --indicator_type premise &
python corpus_construction.py --start 14000 --end 14500 --indicator_type premise &
python corpus_construction.py --start 14500 --end 15000 --indicator_type premise &
python corpus_construction.py --start 15000 --end 15500 --indicator_type premise &
python corpus_construction.py --start 15500 --end 16000 --indicator_type premise &
python corpus_construction.py --start 16000 --end 16500 --indicator_type premise &
python corpus_construction.py --start 16500 --end 17000 --indicator_type premise &
python corpus_construction.py --start 17000 --end 17500 --indicator_type premise &
python corpus_construction.py --start 17500 --end 18000 --indicator_type premise &
wait
cat ./bookcorpus_premise/*.jsonl > ./premise.jsonl
rm -r ./bookcorpus_premise
| ContextualSP/logigan/corpus_construction/mlm_corpus/construct_premise.sh/0 | {
"file_path": "ContextualSP/logigan/corpus_construction/mlm_corpus/construct_premise.sh",
"repo_id": "ContextualSP",
"token_count": 884
} | 251 |
# Usages:
#
# to install matchzoo dependencies:
# $ make init
#
# to run all matchzoo tests, recommended for big PRs and new versions:
# $ make test
#
# there are three kinds of tests:
#
# 1. "quick" tests
# - run in seconds
# - include all unit tests without marks and all doctests
# - for rapid prototyping
# - CI run this for all PRs
#
# 2. "slow" tests
# - run in minutes
# - include all unit tests marked "slow"
# - CI run this for all PRs
#
# 3. "cron" tests
# - run in minutes
# - involves underministic behavoirs (e.g. network connection)
# - include all unit tests marked "cron"
# - CI run this on a daily basis
#
# to run quick tests, excluding time consuming tests and crons:
# $ make quick
#
# to run slow tests, excluding normal tests and crons:
# $ make slow
#
# to run crons:
# $ make cron
#
# to run all tests:
# $ make test
#
# to run CI push/PR tests:
# $ make push
#
# to run docstring style check:
# $ make flake
init:
pip install -r requirements.txt
TEST_ARGS = -v --full-trace -l --doctest-modules --doctest-continue-on-failure --cov matchzoo/ --cov-report term-missing --cov-report html --cov-config .coveragerc matchzoo/ tests/ -W ignore::DeprecationWarning --ignore=matchzoo/contrib
FLAKE_ARGS = ./matchzoo --exclude=__init__.py,matchzoo/contrib
test:
pytest $(TEST_ARGS)
flake8 $(FLAKE_ARGS)
push:
pytest -m 'not cron' $(TEST_ARGS) ${ARGS}
flake8 $(FLAKE_ARGS)
quick:
pytest -m 'not slow and not cron' $(TEST_ARGS) ${ARGS}
slow:
pytest -m 'slow and not cron' $(TEST_ARGS) ${ARGS}
cron:
pytest -m 'cron' $(TEST_ARGS) ${ARGS}
flake:
flake8 $(FLAKE_ARGS) ${ARGS}
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/Makefile/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/Makefile",
"repo_id": "ContextualSP",
"token_count": 622
} | 252 |
import typing
import numpy as np
import matchzoo as mz
from matchzoo.engine.base_task import BaseTask
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.dataloader import DatasetBuilder
from matchzoo.dataloader import DataLoaderBuilder
class Preparer(object):
"""
Unified setup processes of all MatchZoo models.
`config` is used to control specific behaviors. The default `config`
will be updated accordingly if a `config` dictionary is passed. e.g. to
override the default `bin_size`, pass `config={'bin_size': 15}`.
See `tutorials/automation.ipynb` for a detailed walkthrough on usage.
Default `config`:
{
# pair generator builder kwargs
'num_dup': 1,
# histogram unit of DRMM
'bin_size': 30,
'hist_mode': 'LCH',
# dynamic Pooling of MatchPyramid
'compress_ratio_left': 1.0,
'compress_ratio_right': 1.0,
# if no `matchzoo.Embedding` is passed to `tune`
'embedding_output_dim': 50
}
:param task: Task.
:param config: Configuration of specific behaviors.
Example:
>>> import matchzoo as mz
>>> task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss())
>>> preparer = mz.auto.Preparer(task)
>>> model_class = mz.models.DenseBaseline
>>> train_raw = mz.datasets.toy.load_data('train', 'ranking')
>>> model, prpr, dsb, dlb = preparer.prepare(model_class,
... train_raw)
>>> model.params.completed(exclude=['out_activation_func'])
True
"""
def __init__(
self,
task: BaseTask,
config: typing.Optional[dict] = None
):
"""Init."""
self._task = task
self._config = self.get_default_config()
if config:
self._config.update(config)
self._infer_num_neg()
def prepare(
self,
model_class: typing.Type[BaseModel],
data_pack: mz.DataPack,
callback: typing.Optional[BaseCallback] = None,
preprocessor: typing.Optional[BasePreprocessor] = None,
embedding: typing.Optional['mz.Embedding'] = None,
) -> typing.Tuple[
BaseModel,
BasePreprocessor,
DatasetBuilder,
DataLoaderBuilder,
]:
"""
Prepare.
:param model_class: Model class.
:param data_pack: DataPack used to fit the preprocessor.
:param callback: Callback used to padding a batch.
(default: the default callback of `model_class`)
:param preprocessor: Preprocessor used to fit the `data_pack`.
(default: the default preprocessor of `model_class`)
:return: A tuple of `(model, preprocessor, dataset_builder,
dataloader_builder)`.
"""
if not callback:
callback = model_class.get_default_padding_callback()
if not preprocessor:
preprocessor = model_class.get_default_preprocessor()
preprocessor.fit(data_pack, verbose=0)
model, embedding_matrix = self._build_model(
model_class,
preprocessor,
embedding
)
dataset_builder = self._build_dataset_builder(
model,
embedding_matrix,
preprocessor
)
dataloader_builder = self._build_dataloader_builder(
model,
callback
)
return (
model,
preprocessor,
dataset_builder,
dataloader_builder
)
def _build_model(
self,
model_class,
preprocessor,
embedding
) -> typing.Tuple[BaseModel, np.ndarray]:
model = model_class()
model.params['task'] = self._task
if 'with_embedding' in model.params:
embedding_matrix = self._build_matrix(preprocessor, embedding)
model.params['embedding'] = embedding_matrix
else:
embedding_matrix = None
model.build()
return model, embedding_matrix
def _build_matrix(self, preprocessor, embedding):
if embedding is not None:
vocab_unit = preprocessor.context['vocab_unit']
term_index = vocab_unit.state['term_index']
return embedding.build_matrix(term_index)
else:
matrix_shape = (
preprocessor.context['vocab_size'],
self._config['embedding_output_dim']
)
return np.random.uniform(-0.2, 0.2, matrix_shape)
def _build_dataset_builder(self, model, embedding_matrix, preprocessor):
builder_kwargs = dict(
callbacks=[],
batch_size=self._config['batch_size'],
shuffle=self._config['shuffle'],
sort=self._config['sort']
)
if isinstance(self._task.losses[0], (mz.losses.RankHingeLoss,
mz.losses.RankCrossEntropyLoss)):
builder_kwargs.update(dict(
mode='pair',
num_dup=self._config['num_dup'],
num_neg=self._config['num_neg'],
resample=self._config['resample'],
))
if isinstance(model, mz.models.CDSSM):
triletter_callback = mz.dataloader.callbacks.Ngram(
preprocessor, mode='sum')
builder_kwargs['callbacks'].append(triletter_callback)
if isinstance(model, mz.models.DSSM):
triletter_callback = mz.dataloader.callbacks.Ngram(
preprocessor, mode='aggregate')
builder_kwargs['callbacks'].append(triletter_callback)
if isinstance(model, mz.models.DUET):
triletter_callback = mz.dataloader.callbacks.Ngram(
preprocessor, mode='sum')
builder_kwargs['callbacks'].append(triletter_callback)
if isinstance(model, mz.models.DIIN):
letter_callback = mz.dataloader.callbacks.Ngram(
preprocessor, mode='index')
builder_kwargs['callbacks'].append(letter_callback)
if isinstance(model, mz.models.DRMM):
histo_callback = mz.dataloader.callbacks.Histogram(
embedding_matrix=embedding_matrix,
bin_size=self._config['bin_size'],
hist_mode=self._config['hist_mode']
)
builder_kwargs['callbacks'].append(histo_callback)
return DatasetBuilder(**builder_kwargs)
def _build_dataloader_builder(self, model, callback):
builder_kwargs = dict(
stage=self._config['stage'],
callback=callback
)
return DataLoaderBuilder(**builder_kwargs)
def _infer_num_neg(self):
if isinstance(self._task.losses[0], (mz.losses.RankHingeLoss,
mz.losses.RankCrossEntropyLoss)):
self._config['num_neg'] = self._task.losses[0].num_neg
@classmethod
def get_default_config(cls) -> dict:
"""Default config getter."""
return {
# pair dataset builder kwargs
'num_dup': 1,
# dataloader builder kwargs
'batch_size': 8,
'stage': 'train',
'resample': True,
'shuffle': False,
'sort': True,
# histogram unit of DRMM
'bin_size': 30,
'hist_mode': 'LCH',
# dynamic Pooling of MatchPyramid
'compress_ratio_left': 1.0,
'compress_ratio_right': 1.0,
# if no `matchzoo.Embedding` is passed to `tune`
'embedding_output_dim': 100
}
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/preparer/preparer.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/preparer/preparer.py",
"repo_id": "ContextualSP",
"token_count": 3733
} | 253 |
import matchzoo as mz
from matchzoo.dataloader import Dataset
class DatasetBuilder(object):
"""
Dataset Bulider. In essense a wrapped partial function.
Example:
>>> import matchzoo as mz
>>> builder = mz.dataloader.DatasetBuilder(
... mode='point'
... )
>>> data = mz.datasets.toy.load_data()
>>> gen = builder.build(data)
>>> type(gen)
<class 'matchzoo.dataloader.dataset.Dataset'>
"""
def __init__(self, **kwargs):
"""Init."""
self._kwargs = kwargs
def build(self, data_pack, **kwargs) -> Dataset:
"""
Build a Dataset.
:param data_pack: DataPack to build upon.
:param kwargs: Additional keyword arguments to override the keyword
arguments passed in `__init__`.
"""
return mz.dataloader.Dataset(
data_pack, **{**self._kwargs, **kwargs}
)
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataset_builder.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataset_builder.py",
"repo_id": "ContextualSP",
"token_count": 432
} | 254 |
import typing
from pathlib import Path
import pandas as pd
import matchzoo
from matchzoo.engine.base_task import BaseTask
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'ranking',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, typing.Tuple[matchzoo.DataPack, list]]:
"""
Load toy data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
Example:
>>> import matchzoo as mz
>>> stages = 'train', 'dev', 'test'
>>> tasks = 'ranking', 'classification'
>>> for stage in stages:
... for task in tasks:
... _ = mz.datasets.toy.load_data(stage, task)
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
path = Path(__file__).parent.joinpath(f'{stage}.csv')
data_pack = matchzoo.pack(pd.read_csv(path, index_col=0), task)
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def load_embedding():
path = Path(__file__).parent.joinpath('embedding.2d.txt')
return matchzoo.embedding.load_from_file(path, mode='glove')
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/toy/__init__.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/toy/__init__.py",
"repo_id": "ContextualSP",
"token_count": 816
} | 255 |
"""Parameter class."""
import inspect
import numbers
import typing
import hyperopt.pyll
from matchzoo.engine import hyper_spaces
# Both hyperopt native spaces and matchzoo proxies are valid spaces.
SpaceType = typing.Union[hyperopt.pyll.Apply, hyper_spaces.HyperoptProxy]
class Param(object):
"""
Parameter class.
Basic usages with a name and value:
>>> param = Param('my_param', 10)
>>> param.name
'my_param'
>>> param.value
10
Use with a validator to make sure the parameter always keeps a valid
value.
>>> param = Param(
... name='my_param',
... value=5,
... validator=lambda x: 0 < x < 20
... )
>>> param.validator # doctest: +ELLIPSIS
<function <lambda> at 0x...>
>>> param.value
5
>>> param.value = 10
>>> param.value
10
>>> param.value = -1
Traceback (most recent call last):
...
ValueError: Validator not satifised.
The validator's definition is as follows:
validator=lambda x: 0 < x < 20
Use with a hyper space. Setting up a hyper space for a parameter makes the
parameter tunable in a :class:`matchzoo.engine.Tuner`.
>>> from matchzoo.engine.hyper_spaces import quniform
>>> param = Param(
... name='positive_num',
... value=1,
... hyper_space=quniform(low=1, high=5)
... )
>>> param.hyper_space # doctest: +ELLIPSIS
<matchzoo.engine.hyper_spaces.quniform object at ...>
>>> from hyperopt.pyll.stochastic import sample
>>> hyperopt_space = param.hyper_space.convert(param.name)
>>> samples = [sample(hyperopt_space) for _ in range(64)]
>>> set(samples) == {1, 2, 3, 4, 5}
True
The boolean value of a :class:`Param` instance is only `True`
when the value is not `None`. This is because some default falsy values
like zero or an empty list are valid parameter values. In other words,
the boolean value means to be "if the parameter value is filled".
>>> param = Param('dropout')
>>> if param:
... print('OK')
>>> param = Param('dropout', 0)
>>> if param:
... print('OK')
OK
A `_pre_assignment_hook` is initialized as a data type convertor if the
value is set as a number to keep data type consistency of the parameter.
This conversion supports python built-in numbers, `numpy` numbers, and
any number that inherits :class:`numbers.Number`.
>>> param = Param('float_param', 0.5)
>>> param.value = 10
>>> param.value
10.0
>>> type(param.value)
<class 'float'>
"""
def __init__(
self,
name: str,
value: typing.Any = None,
hyper_space: typing.Optional[SpaceType] = None,
validator: typing.Optional[
typing.Callable[[typing.Any], bool]] = None,
desc: typing.Optional[str] = None,
):
"""
Parameter constructor.
:param name: Name of the parameter.
:param value: Value of the parameter, `None` by default, which means
"this parameter is not filled yet."
:param hyper_space: Hyper space of the parameter, `None` by default.
If set, then a :class:`matchzoo.engine.ParamTable` that has this
parameter will include this `hyper_space` as a part of the
parameter table's search space.
:param validator: Validator of the parameter, `None` by default. If
validation is needed, pass a callable that, given a value, returns
a `bool`. The definition of the validator is retrieved when the
validation fails, so either use a function or a `lambda` that
occupies its own line for better readability.
"""
self._name = name
self._desc = desc
self._value = None
self._hyper_space = None
self._validator = None
self._pre_assignment_hook = None
self.validator = validator
self.hyper_space = hyper_space
if value is not None: # bypass checking if no default
self.value = value
@property
def name(self) -> str:
""":return: Name of the parameter."""
return self._name
@property
def value(self) -> typing.Any:
""":return: Value of the parameter."""
return self._value
@value.setter
def value(self, new_value: typing.Any):
"""
Set the value of parameter to `new_value`.
Notice that this setter validates `new_value` before assignment. As
a result, if the validaiton fails, the value of the parameter is not
changed.
:param new_value: New value of the parameter to set.
"""
if self._pre_assignment_hook:
new_value = self._pre_assignment_hook(new_value)
self._validate(new_value)
self._value = new_value
if not self._pre_assignment_hook:
self._infer_pre_assignment_hook()
@property
def hyper_space(self) -> SpaceType:
""":return: Hyper space of the parameter."""
return self._hyper_space
@hyper_space.setter
def hyper_space(self, new_space: SpaceType):
""":param new_space: New space of the parameter to set."""
self._hyper_space = new_space
@property
def validator(self) -> typing.Callable[[typing.Any], bool]:
""":return: Validator of the parameter."""
return self._validator
@validator.setter
def validator(self, new_validator: typing.Callable[[typing.Any], bool]):
""":param new_validator: New space of the parameter to set."""
if new_validator and not callable(new_validator):
raise TypeError("Validator must be a callable or None.")
self._validator = new_validator
@property
def desc(self) -> str:
""":return: Parameter description."""
return self._desc
@desc.setter
def desc(self, value: str):
""":param value: New description of the parameter."""
self._desc = value
def _infer_pre_assignment_hook(self):
if isinstance(self._value, numbers.Number):
self._pre_assignment_hook = lambda x: type(self._value)(x)
def _validate(self, value):
if self._validator:
valid = self._validator(value)
if not valid:
error_msg = "Validator not satifised.\n"
error_msg += "The validator's definition is as follows:\n"
error_msg += inspect.getsource(self._validator).strip()
raise ValueError(error_msg)
def __bool__(self):
""":return: `False` when the value is `None`, `True` otherwise."""
return self._value is not None
def set_default(self, val, verbose=1):
"""
Set default value, has no effect if already has a value.
:param val: Default value to set.
:param verbose: Verbosity.
"""
if self._value is None:
self.value = val
if verbose:
print(f"Parameter \"{self._name}\" set to {val}.")
def reset(self):
"""
Set the parameter's value to `None`, which means "not set".
This method bypasses validator.
Example:
>>> import matchzoo as mz
>>> param = mz.Param(
... name='str', validator=lambda x: isinstance(x, str))
>>> param.value = 'hello'
>>> param.value = None
Traceback (most recent call last):
...
ValueError: Validator not satifised.
The validator's definition is as follows:
name='str', validator=lambda x: isinstance(x, str))
>>> param.reset()
>>> param.value is None
True
"""
self._value = None
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/param.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/param.py",
"repo_id": "ContextualSP",
"token_count": 3371
} | 256 |
"""An implementation of aNMM Model."""
import typing
import torch
import torch.nn as nn
from matchzoo.dataloader import callbacks
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.modules import Attention, Matching
from matchzoo.utils import parse_activation
class aNMM(BaseModel):
"""
aNMM: Ranking Short Answer Texts with Attention-Based Neural Matching Model.
Examples:
>>> model = aNMM()
>>> model.params['embedding_output_dim'] = 300
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='num_bins', value=200,
desc="Integer, number of bins."))
params.add(Param(name='hidden_sizes', value=[100],
desc="Number of hidden size for each hidden layer"))
params.add(Param(name='activation', value='relu',
desc="The activation function."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
aNMM: Ranking Short Answer Texts with Attention-Based Neural Matching Model.
"""
self.embedding = self._make_default_embedding_layer()
# QA Matching
self.matching = Matching(matching_type='dot', normalize=True)
# Value-shared Weighting
activation = parse_activation(self._params['activation'])
in_hidden_size = [
self._params['num_bins'],
*self._params['hidden_sizes']
]
out_hidden_size = [
*self._params['hidden_sizes'],
1
]
hidden_layers = [
nn.Sequential(
nn.Linear(in_size, out_size),
activation
)
for in_size, out_size, in zip(
in_hidden_size,
out_hidden_size
)
]
self.hidden_layers = nn.Sequential(*hidden_layers)
# Query Attention
self.q_attention = Attention(self._params['embedding_output_dim'])
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
# Build output
self.out = self._make_output_layer(1)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# BI = number of bins
# Left input and right input
# shape = [B, L]
# shape = [B, R]
input_left, input_right = inputs['text_left'], inputs['text_right']
# Process left and right input
# shape = [B, L, D]
# shape = [B, R, D]
embed_left = self.embedding(input_left.long())
embed_right = self.embedding(input_right.long())
# Left and right input mask matrix
# shape = [B, L]
# shape = [B, R]
left_mask = (input_left == self._params['mask_value'])
right_mask = (input_right == self._params['mask_value'])
# Compute QA Matching matrix
# shape = [B, L, R]
qa_matching_matrix = self.matching(embed_left, embed_right)
qa_matching_matrix.masked_fill_(right_mask.unsqueeze(1), float(0))
# Bin QA Matching Matrix
B, L = qa_matching_matrix.shape[0], qa_matching_matrix.shape[1]
BI = self._params['num_bins']
device = qa_matching_matrix.device
qa_matching_matrix = qa_matching_matrix.view(-1)
qa_matching_detach = qa_matching_matrix.detach()
bin_indexes = torch.floor((qa_matching_detach + 1.) / 2 * (BI - 1.)).long()
bin_indexes = bin_indexes.view(B * L, -1)
index_offset = torch.arange(start=0, end=(B * L * BI), step=BI,
device=device).long().unsqueeze(-1)
bin_indexes += index_offset
bin_indexes = bin_indexes.view(-1)
# shape = [B, L, BI]
bin_qa_matching = torch.zeros(B * L * BI, device=device)
bin_qa_matching.index_add_(0, bin_indexes, qa_matching_matrix)
bin_qa_matching = bin_qa_matching.view(B, L, -1)
# Apply dropout
bin_qa_matching = self.dropout(bin_qa_matching)
# MLP hidden layers
# shape = [B, L, 1]
hiddens = self.hidden_layers(bin_qa_matching)
# Query attention
# shape = [B, L, 1]
q_attention = self.q_attention(embed_left, left_mask).unsqueeze(-1)
# shape = [B, 1]
score = torch.sum(hiddens * q_attention, dim=1)
# shape = [B, *]
out = self.out(score)
return out
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/anmm.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/anmm.py",
"repo_id": "ContextualSP",
"token_count": 2474
} | 257 |
"""An implementation of KNRM Model."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import GaussianKernel
class KNRM(BaseModel):
"""
KNRM Model.
Examples:
>>> model = KNRM()
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='kernel_num',
value=11,
hyper_space=hyper_spaces.quniform(low=5, high=20),
desc="The number of RBF kernels."
))
params.add(Param(
name='sigma',
value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.01, high=0.2, q=0.01),
desc="The `sigma` defines the kernel width."
))
params.add(Param(
name='exact_sigma', value=0.001,
desc="The `exact_sigma` denotes the `sigma` "
"for exact match."
))
return params
def build(self):
"""Build model structure."""
self.embedding = self._make_default_embedding_layer()
self.kernels = nn.ModuleList()
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
self.kernels.append(GaussianKernel(mu=mu, sigma=sigma))
self.out = self._make_output_layer(self._params['kernel_num'])
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# K = number of kernels
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
query, doc = inputs['text_left'], inputs['text_right']
# Process left input.
# shape = [B, L, D]
embed_query = self.embedding(query.long())
# shape = [B, R, D]
embed_doc = self.embedding(doc.long())
# shape = [B, L, R]
matching_matrix = torch.einsum(
'bld,brd->blr',
F.normalize(embed_query, p=2, dim=-1),
F.normalize(embed_doc, p=2, dim=-1)
)
KM = []
for kernel in self.kernels:
# shape = [B]
K = torch.log1p(kernel(matching_matrix).sum(dim=-1)).sum(dim=-1)
KM.append(K)
# shape = [B, K]
phi = torch.stack(KM, dim=1)
out = self.out(phi)
return out
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/knrm.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/knrm.py",
"repo_id": "ContextualSP",
"token_count": 1573
} | 258 |
"""Semantic composite module for DIIN model."""
import typing
import torch
import torch.nn as nn
class SemanticComposite(nn.Module):
"""
SemanticComposite module.
Apply a self-attention layer and a semantic composite fuse gate to compute the
encoding result of one tensor.
:param in_features: Feature size of input.
:param dropout_rate: The dropout rate.
Examples:
>>> import torch
>>> module = SemanticComposite(in_features=10)
>>> x = torch.randn(4, 5, 10)
>>> x.shape
torch.Size([4, 5, 10])
>>> module(x).shape
torch.Size([4, 5, 10])
"""
def __init__(self, in_features, dropout_rate: float = 0.0):
"""Init."""
super().__init__()
self.att_linear = nn.Linear(3 * in_features, 1, False)
self.z_gate = nn.Linear(2 * in_features, in_features, True)
self.r_gate = nn.Linear(2 * in_features, in_features, True)
self.f_gate = nn.Linear(2 * in_features, in_features, True)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
"""Forward."""
seq_length = x.shape[1]
x_1 = x.unsqueeze(dim=2).repeat(1, 1, seq_length, 1)
x_2 = x.unsqueeze(dim=1).repeat(1, seq_length, 1, 1)
x_concat = torch.cat([x_1, x_2, x_1 * x_2], dim=-1)
# Self-attention layer.
x_concat = self.dropout(x_concat)
attn_matrix = self.att_linear(x_concat).squeeze(dim=-1)
attn_weight = torch.softmax(attn_matrix, dim=2)
attn = torch.bmm(attn_weight, x)
# Semantic composite fuse gate.
x_attn_concat = self.dropout(torch.cat([x, attn], dim=-1))
x_attn_concat = torch.cat([x, attn], dim=-1)
z = torch.tanh(self.z_gate(x_attn_concat))
r = torch.sigmoid(self.r_gate(x_attn_concat))
f = torch.sigmoid(self.f_gate(x_attn_concat))
encoding = r * x + f * z
return encoding
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/semantic_composite.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/semantic_composite.py",
"repo_id": "ContextualSP",
"token_count": 916
} | 259 |
import numpy as np
from .unit import Unit
class MatchingHistogram(Unit):
"""
MatchingHistogramUnit Class.
:param bin_size: The number of bins of the matching histogram.
:param embedding_matrix: The word embedding matrix applied to calculate
the matching histogram.
:param normalize: Boolean, normalize the embedding or not.
:param mode: The type of the historgram, it should be one of 'CH', 'NG',
or 'LCH'.
Examples:
>>> embedding_matrix = np.array([[1.0, -1.0], [1.0, 2.0], [1.0, 3.0]])
>>> text_left = [0, 1]
>>> text_right = [1, 2]
>>> histogram = MatchingHistogram(3, embedding_matrix, True, 'CH')
>>> histogram.transform([text_left, text_right])
[[3.0, 1.0, 1.0], [1.0, 2.0, 2.0]]
"""
def __init__(self, bin_size: int = 30, embedding_matrix=None,
normalize=True, mode: str = 'LCH'):
"""The constructor."""
self._hist_bin_size = bin_size
self._embedding_matrix = embedding_matrix
if normalize:
self._normalize_embedding()
self._mode = mode
def _normalize_embedding(self):
"""Normalize the embedding matrix."""
l2_norm = np.sqrt(
(self._embedding_matrix * self._embedding_matrix).sum(axis=1)
)
self._embedding_matrix = \
self._embedding_matrix / l2_norm[:, np.newaxis]
def transform(self, input_: list) -> list:
"""Transform the input text."""
text_left, text_right = input_
matching_hist = np.ones((len(text_left), self._hist_bin_size),
dtype=np.float32)
embed_left = self._embedding_matrix[text_left]
embed_right = self._embedding_matrix[text_right]
matching_matrix = embed_left.dot(np.transpose(embed_right))
for (i, j), value in np.ndenumerate(matching_matrix):
bin_index = int((value + 1.) / 2. * (self._hist_bin_size - 1.))
matching_hist[i][bin_index] += 1.0
if self._mode == 'NH':
matching_sum = matching_hist.sum(axis=1)
matching_hist = matching_hist / matching_sum[:, np.newaxis]
elif self._mode == 'LCH':
matching_hist = np.log(matching_hist)
return matching_hist.tolist()
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/matching_histogram.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/matching_histogram.py",
"repo_id": "ContextualSP",
"token_count": 1080
} | 260 |
"""Base Trainer."""
import typing
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import matchzoo
from matchzoo import tasks
from matchzoo.dataloader import DataLoader
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.base_metric import BaseMetric
from matchzoo.utils import AverageMeter, Timer, EarlyStopping
class Trainer:
"""
MatchZoo tranier.
:param model: A :class:`BaseModel` instance.
:param optimizer: A :class:`optim.Optimizer` instance.
:param trainloader: A :class`DataLoader` instance. The dataloader
is used for training the model.
:param validloader: A :class`DataLoader` instance. The dataloader
is used for validating the model.
:param device: The desired device of returned tensor. Default:
if None, use the current device. If `torch.device` or int,
use device specified by user. If list, use data parallel.
:param start_epoch: Int. Number of starting epoch.
:param epochs: The maximum number of epochs for training.
Defaults to 10.
:param validate_interval: Int. Interval of validation.
:param scheduler: LR scheduler used to adjust the learning rate
based on the number of epochs.
:param clip_norm: Max norm of the gradients to be clipped.
:param patience: Number fo events to wait if no improvement and
then stop the training.
:param key: Key of metric to be compared.
:param checkpoint: A checkpoint from which to continue training.
If None, training starts from scratch. Defaults to None.
Should be a file-like object (has to implement read, readline,
tell, and seek), or a string containing a file name.
:param save_dir: Directory to save trainer.
:param save_all: Bool. If True, save `Trainer` instance; If False,
only save model. Defaults to False.
:param verbose: 0, 1, or 2. Verbosity mode. 0 = silent,
1 = verbose, 2 = one log line per epoch.
"""
def __init__(
self,
model: BaseModel,
optimizer: optim.Optimizer,
trainloader: DataLoader,
validloader: DataLoader,
device: typing.Union[torch.device, int, list, None] = None,
start_epoch: int = 1,
epochs: int = 10,
validate_interval: typing.Optional[int] = None,
scheduler: typing.Any = None,
clip_norm: typing.Union[float, int] = None,
patience: typing.Optional[int] = None,
key: typing.Any = None,
checkpoint: typing.Union[str, Path] = None,
save_dir: typing.Union[str, Path] = None,
save_all: bool = False,
verbose: int = 1,
**kwargs
):
"""Base Trainer constructor."""
self._load_model(model, device)
self._load_dataloader(
trainloader, validloader, validate_interval
)
self._optimizer = optimizer
self._scheduler = scheduler
self._clip_norm = clip_norm
self._criterions = self._task.losses
if not key:
key = self._task.metrics[0]
self._early_stopping = EarlyStopping(
patience=patience,
key=key
)
self._start_epoch = start_epoch
self._epochs = epochs
self._iteration = 0
self._verbose = verbose
self._save_all = save_all
print("validate_interval:", self._validate_interval, flush=True)
self._load_path(checkpoint, save_dir)
def _load_dataloader(
self,
trainloader: DataLoader,
validloader: DataLoader,
validate_interval: typing.Optional[int] = None
):
"""
Load trainloader and determine validate interval.
:param trainloader: A :class`DataLoader` instance. The dataloader
is used to train the model.
:param validloader: A :class`DataLoader` instance. The dataloader
is used to validate the model.
:param validate_interval: int. Interval of validation.
"""
if not isinstance(trainloader, DataLoader):
raise ValueError(
'trainloader should be a `DataLoader` instance.'
)
if not isinstance(validloader, DataLoader):
raise ValueError(
'validloader should be a `DataLoader` instance.'
)
self._trainloader = trainloader
self._validloader = validloader
if not validate_interval:
self._validate_interval = len(self._trainloader)
else:
self._validate_interval = validate_interval
def _load_model(
self,
model: BaseModel,
device: typing.Union[torch.device, int, list, None] = None
):
"""
Load model.
:param model: :class:`BaseModel` instance.
:param device: The desired device of returned tensor. Default:
if None, use the current device. If `torch.device` or int,
use device specified by user. If list, use data parallel.
"""
if not isinstance(model, BaseModel):
raise ValueError(
'model should be a `BaseModel` instance.'
f' But got {type(model)}.'
)
self._task = model.params['task']
self._data_parallel = False
self._model = model
if isinstance(device, list) and len(device):
self._data_parallel = True
self._model = torch.nn.DataParallel(self._model, device_ids=device)
self._device = device[0]
else:
if not (isinstance(device, torch.device) or isinstance(device, int)):
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self._device = device
self._model.to(self._device)
def _load_path(
self,
checkpoint: typing.Union[str, Path],
save_dir: typing.Union[str, Path],
):
"""
Load save_dir and Restore from checkpoint.
:param checkpoint: A checkpoint from which to continue training.
If None, training starts from scratch. Defaults to None.
Should be a file-like object (has to implement read, readline,
tell, and seek), or a string containing a file name.
:param save_dir: Directory to save trainer.
"""
if not save_dir:
save_dir = Path('.').joinpath('save')
if not Path(save_dir).exists():
Path(save_dir).mkdir(parents=True)
self._save_dir = Path(save_dir)
# Restore from checkpoint
if checkpoint:
if self._save_all:
self.restore(checkpoint)
else:
self.restore_model(checkpoint)
def _backward(self, loss):
"""
Computes the gradient of current `loss` graph leaves.
:param loss: Tensor. Loss of model.
"""
self._optimizer.zero_grad()
loss.backward()
if self._clip_norm:
nn.utils.clip_grad_norm_(
self._model.parameters(), self._clip_norm
)
self._optimizer.step()
def _run_scheduler(self):
"""Run scheduler."""
if self._scheduler:
self._scheduler.step()
def run(self):
"""
Train model.
The processes:
Run each epoch -> Run scheduler -> Should stop early?
"""
self._model.train()
timer = Timer()
for epoch in range(self._start_epoch, self._epochs + 1):
self._epoch = epoch
self._run_epoch()
self._run_scheduler()
if self._early_stopping.should_stop_early:
break
if self._verbose:
tqdm.write(f'Cost time: {timer.time}s')
def _run_epoch(self):
"""
Run each epoch.
The training steps:
- Get batch and feed them into model
- Get outputs. Caculate all losses and sum them up
- Loss backwards and optimizer steps
- Evaluation
- Update and output result
"""
# Get total number of batch
num_batch = len(self._trainloader)
train_loss = AverageMeter()
with tqdm(enumerate(self._trainloader), total=num_batch,
disable=not self._verbose) as pbar:
for step, (inputs, target) in pbar:
outputs = self._model(inputs)
# Caculate all losses and sum them up
loss = torch.sum(
*[c(outputs, target) for c in self._criterions]
)
self._backward(loss)
train_loss.update(loss.item())
# Set progress bar
pbar.set_description(f'Epoch {self._epoch}/{self._epochs}')
pbar.set_postfix(loss=f'{loss.item():.3f}')
# Run validate
self._iteration += 1
# print(f"current _iteration:{self._iteration}", flush=True)
if self._iteration % self._validate_interval == 0:
pbar.update(1)
if self._verbose:
pbar.write(
f'[Iter-{self._iteration} '
f'Loss-{train_loss.avg:.3f}]:')
print(f'[Iter-{self._iteration} '
f'Loss-{train_loss.avg:.3f}]:', flush=True)
result = self.evaluate(self._validloader)
if self._verbose:
pbar.write(' Validation: ' + ' - '.join(
f'{k}: {round(v, 4)}' for k, v in result.items()))
print(' Validation: ' + ' - '.join(
f'{k}: {round(v, 4)}' for k, v in result.items()), flush=True)
# Early stopping
self._early_stopping.update(result)
if self._early_stopping.should_stop_early:
self._save(self._epoch)
pbar.write('Ran out of patience. Stop training...')
break
elif self._early_stopping.is_best_so_far:
self._save(self._epoch)
def evaluate(
self,
dataloader: DataLoader,
):
"""
Evaluate the model.
:param dataloader: A DataLoader object to iterate over the data.
"""
result = dict()
y_pred = self.predict(dataloader)
y_true = dataloader.label
id_left = dataloader.id_left
if isinstance(self._task, tasks.Classification):
for metric in self._task.metrics:
result[metric] = metric(y_true, y_pred)
else:
for metric in self._task.metrics:
result[metric] = self._eval_metric_on_data_frame(
metric, id_left, y_true, y_pred.squeeze(axis=-1))
return result
@classmethod
def _eval_metric_on_data_frame(
cls,
metric: BaseMetric,
id_left: typing.Any,
y_true: typing.Union[list, np.array],
y_pred: typing.Union[list, np.array]
):
"""
Eval metric on data frame.
This function is used to eval metrics for `Ranking` task.
:param metric: Metric for `Ranking` task.
:param id_left: id of input left. Samples with same id_left should
be grouped for evaluation.
:param y_true: Labels of dataset.
:param y_pred: Outputs of model.
:return: Evaluation result.
"""
eval_df = pd.DataFrame(data={
'id': id_left,
'true': y_true,
'pred': y_pred
})
assert isinstance(metric, BaseMetric)
val = eval_df.groupby(by='id').apply(
lambda df: metric(df['true'].values, df['pred'].values)
).mean()
return val
def predict(
self,
dataloader: DataLoader
) -> np.array:
"""
Generate output predictions for the input samples.
:param dataloader: input DataLoader
:return: predictions
"""
with torch.no_grad():
self._model.eval()
predictions = []
for batch in dataloader:
inputs = batch[0]
outputs = self._model(inputs).detach().cpu()
predictions.append(outputs)
self._model.train()
return torch.cat(predictions, dim=0).numpy()
def _save(self, epoch):
"""Save."""
if self._save_all:
self.save(epoch)
else:
self.save_model(epoch)
def save_model(self, epoch):
"""Save the model."""
checkpoint = self._save_dir.joinpath(f'model-{epoch}.pt')
print(f"Epoch{epoch}: save the model at {checkpoint}", flush=True)
if self._data_parallel:
torch.save(self._model.module.state_dict(), checkpoint)
else:
torch.save(self._model.state_dict(), checkpoint)
def save(self, epoch):
"""
Save the trainer.
`Trainer` parameters like epoch, best_so_far, model, optimizer
and early_stopping will be savad to specific file path.
:param path: Path to save trainer.
"""
checkpoint = self._save_dir.joinpath(f'trainer-{epoch}.pt')
print(f"Epoch{epoch}: save the trainer at {checkpoint}", flush=True)
if self._data_parallel:
model = self._model.module.state_dict()
else:
model = self._model.state_dict()
state = {
'epoch': self._epoch,
'model': model,
'optimizer': self._optimizer.state_dict(),
'early_stopping': self._early_stopping.state_dict(),
}
if self._scheduler:
state['scheduler'] = self._scheduler.state_dict()
torch.save(state, checkpoint)
def restore_model(self, checkpoint: typing.Union[str, Path]):
"""
Restore model.
:param checkpoint: A checkpoint from which to continue training.
"""
state = torch.load(checkpoint, map_location=self._device)
if self._data_parallel:
self._model.module.load_state_dict(state)
else:
self._model.load_state_dict(state)
def restore(self, checkpoint: typing.Union[str, Path] = None):
"""
Restore trainer.
:param checkpoint: A checkpoint from which to continue training.
"""
state = torch.load(checkpoint, map_location=self._device)
if self._data_parallel:
self._model.module.load_state_dict(state['model'])
else:
self._model.load_state_dict(state['model'])
self._optimizer.load_state_dict(state['optimizer'])
self._start_epoch = state['epoch'] + 1
self._early_stopping.load_state_dict(state['early_stopping'])
if self._scheduler:
self._scheduler.load_state_dict(state['scheduler'])
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/trainers/trainer.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/trainers/trainer.py",
"repo_id": "ContextualSP",
"token_count": 7080
} | 261 |
import pytest
import matchzoo as mz
from matchzoo import preprocessors
from matchzoo.dataloader import callbacks
from matchzoo.dataloader import Dataset, DataLoader
from matchzoo.datasets import embeddings
from matchzoo.embedding import load_from_file
@pytest.fixture(scope='module')
def train_raw():
return mz.datasets.toy.load_data('test', task='ranking')[:5]
def test_basic_padding(train_raw):
preprocessor = preprocessors.BasicPreprocessor()
data_preprocessed = preprocessor.fit_transform(train_raw, verbose=0)
dataset = Dataset(data_preprocessed, batch_size=5, mode='point')
pre_fixed_padding = callbacks.BasicPadding(
fixed_length_left=5, fixed_length_right=5, pad_word_mode='pre', with_ngram=False)
dataloader = DataLoader(dataset, callback=pre_fixed_padding)
for batch in dataloader:
assert batch[0]['text_left'].shape == (5, 5)
assert batch[0]['text_right'].shape == (5, 5)
post_padding = callbacks.BasicPadding(pad_word_mode='post', with_ngram=False)
dataloader = DataLoader(dataset, callback=post_padding)
for batch in dataloader:
max_left_len = max(batch[0]['length_left'].detach().cpu().numpy())
max_right_len = max(batch[0]['length_right'].detach().cpu().numpy())
assert batch[0]['text_left'].shape == (5, max_left_len)
assert batch[0]['text_right'].shape == (5, max_right_len)
def test_drmm_padding(train_raw):
preprocessor = preprocessors.BasicPreprocessor()
data_preprocessed = preprocessor.fit_transform(train_raw, verbose=0)
embedding_matrix = load_from_file(embeddings.EMBED_10_GLOVE, mode='glove')
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = embedding_matrix.build_matrix(term_index)
histgram_callback = callbacks.Histogram(
embedding_matrix=embedding_matrix, bin_size=30, hist_mode='LCH')
dataset = Dataset(
data_preprocessed, mode='point', batch_size=5, callbacks=[histgram_callback])
pre_fixed_padding = callbacks.DRMMPadding(
fixed_length_left=5, fixed_length_right=5, pad_mode='pre')
dataloader = DataLoader(dataset, callback=pre_fixed_padding)
for batch in dataloader:
assert batch[0]['text_left'].shape == (5, 5)
assert batch[0]['text_right'].shape == (5, 5)
assert batch[0]['match_histogram'].shape == (5, 5, 30)
post_padding = callbacks.DRMMPadding(pad_mode='post')
dataloader = DataLoader(dataset, callback=post_padding)
for batch in dataloader:
max_left_len = max(batch[0]['length_left'].detach().cpu().numpy())
max_right_len = max(batch[0]['length_right'].detach().cpu().numpy())
assert batch[0]['text_left'].shape == (5, max_left_len)
assert batch[0]['text_right'].shape == (5, max_right_len)
assert batch[0]['match_histogram'].shape == (5, max_left_len, 30)
def test_bert_padding(train_raw):
preprocessor = preprocessors.BertPreprocessor()
data_preprocessed = preprocessor.transform(train_raw, verbose=0)
dataset = Dataset(data_preprocessed, mode='point', batch_size=5)
pre_fixed_padding = callbacks.BertPadding(
fixed_length_left=5, fixed_length_right=5, pad_mode='pre')
dataloader = DataLoader(dataset, callback=pre_fixed_padding)
for batch in dataloader:
assert batch[0]['text_left'].shape == (5, 7)
assert batch[0]['text_right'].shape == (5, 6)
post_padding = callbacks.BertPadding(pad_mode='post')
dataloader = DataLoader(dataset, callback=post_padding)
for batch in dataloader:
max_left_len = max(batch[0]['length_left'].detach().cpu().numpy())
max_right_len = max(batch[0]['length_right'].detach().cpu().numpy())
assert batch[0]['text_left'].shape == (5, max_left_len + 2)
assert batch[0]['text_right'].shape == (5, max_right_len + 1)
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/dataloader/test_callbacks.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/dataloader/test_callbacks.py",
"repo_id": "ContextualSP",
"token_count": 1558
} | 262 |
import torch
import numpy as np
import pandas as pd
import matchzoo as mz
import os
print('matchzoo version', mz.__version__)
model_name = "esim-mcd1"
model_path = f"../../model/traversal_path_{model_name}/"
data_path = f"../../data/"
if not os.path.exists(model_path):
os.mkdir(model_path)
task = mz.tasks.Classification(num_classes=2)
task.metrics = ['acc']
print("`classification_task` initialized with metrics", task.metrics)
# task = mz.tasks.Classification()
train_raw = mz.datasets.cfq.load_data(stage='train', task=task, data_root=data_path, suffix="mask_classification.csv")
dev_raw = mz.datasets.cfq.load_data(stage='dev', task=task, data_root=data_path, suffix = "mask_classification.csv")
print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`')
preprocessor = mz.models.ESIM.get_default_preprocessor()
train_processed = preprocessor.fit_transform(train_raw)
if os.path.exists(f"{model_path}/preprocessor.dill"):
os.remove(f"{model_path}/preprocessor.dill")
preprocessor.save(model_path)
dev_processed = preprocessor.transform(dev_raw)
print(train_processed.frame())
print(dev_processed.frame())
trainset = mz.dataloader.Dataset(
data_pack=train_processed,
mode='point',
batch_size = 256,
shuffle = True
)
devset = mz.dataloader.Dataset(
data_pack=dev_processed,
mode='point',
batch_size=256,
shuffle = False
)
padding_callback = mz.models.ESIM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
stage='train',
callback=padding_callback
)
devloader = mz.dataloader.DataLoader(
dataset=devset,
stage='dev',
callback=padding_callback
)
model = mz.models.ESIM()
model.params['task'] = task
model.params['embedding_input_dim'] = preprocessor.context['embedding_input_dim']
model.guess_and_fill_missing_params()
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=devloader,
validate_interval=None,
epochs=50,
save_all = False,
save_dir=model_path,
device=[0,1,2, 3,4,5,7]
)
trainer.run()
| ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/train_esim.py/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/train_esim.py",
"repo_id": "ContextualSP",
"token_count": 884
} | 263 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.preprocessors.BasicPreprocessor(
truncated_length_left = 10,
truncated_length_right = 40,
filter_low_freq = 2
)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=5,
num_neg=1
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.KNRM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.KNRM()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['kernel_num'] = 21
model.params['sigma'] = 0.1
model.params['exact_sigma'] = 0.001
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output> | ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/knrm.ipynb/0 | {
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/knrm.ipynb",
"repo_id": "ContextualSP",
"token_count": 734
} | 264 |
#!/usr/bin/env bash
export seed=1
export config_file=train_configs_bert/concat.none.jsonnet
export model_file=checkpoints_cosql/cosql_bert_concat_none_model
export tables_file=dataset_cosql/tables.json
export database_path=dataset_cosql/database
export dataset_path=dataset_cosql
export train_data_path=dataset_cosql/train.json
export validation_data_path=dataset_cosql/dev.json
allennlp train -s ${model_file} ${config_file} \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
-o "{\"model.serialization_dir\":\"${model_file}\",\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\",\"dataset_reader.tables_file\":\"${tables_file}\",\"dataset_reader.database_path\":\"${database_path}\",\"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.dataset_path\":\"${dataset_path}\"}" | ContextualSP/semantic_parsing_in_context/bash_files/linux/train_cosql_bert.bash/0 | {
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/linux/train_cosql_bert.bash",
"repo_id": "ContextualSP",
"token_count": 324
} | 265 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Utility functions for reading the standardised text2sql datasets presented in
`"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_
"""
import json
import os
import sqlite3
from collections import defaultdict
from typing import List, Dict, Optional
class TableColumn:
"""
Representing the column of table
"""
def __init__(self,
name: str,
text: str,
column_type: str,
is_primary_key: bool,
refer_table,
foreign_key: Optional[List[str]]):
self.name = name
self.text = text
self.column_type = column_type
self.is_primary_key = is_primary_key
self.foreign_key = foreign_key
self.refer_table = refer_table
def __str__(self):
return f'{self.name}'
class Table:
"""
Representing the table
"""
def __init__(self,
name: str,
text: str,
columns: List[TableColumn]):
self.name = name
self.text = text
self.columns = columns
def read_dataset_schema(schema_path: str):
"""
Reading all table from `schema_path`.
:param schema_path: default from `tables.json` of sparc data folder.
:return:
"""
schemas: Dict[str, Dict[str, Table]] = defaultdict(dict)
schema_id_to_table: Dict[str, Dict[int, Table]] = defaultdict(dict)
schema_id_to_col: Dict[str, Dict[int, TableColumn]] = defaultdict(dict)
dbs_json_blob = json.load(open(schema_path, "r"))
for db in dbs_json_blob:
db_id = db['db_id']
column_id_to_table = {}
column_id_to_column = {}
for i, (column, text, column_type) in enumerate(zip(db['column_names_original'],
db['column_names'],
db['column_types'])):
table_id, column_name = column
_, column_text = text
table_name = db['table_names_original'][table_id]
if table_name not in schemas[db_id]:
table_text = db['table_names'][table_id]
table_obj = Table(table_name, table_text, [])
schemas[db_id][table_name] = table_obj
table_obj = schemas[db_id][table_name]
if column_name == "*":
# TODO: we cannot add an extra command to handle * problem.
# we now use a special embedding for linking * and predicting action
is_primary_key = False
else:
is_primary_key = i in db['primary_keys']
# allocate new column object
column_obj = TableColumn(column_name.lower(), column_text, column_type,
is_primary_key, table_obj, None)
schemas[db_id][table_name].columns.append(column_obj)
column_id_to_column[i] = column_obj
for (c1, c2) in db['foreign_keys']:
foreign_key = column_id_to_column[c2].refer_table.name + ':' + column_id_to_column[c2].name
# TODO: we able multiple foreign keys existing to allow the shortcut join
if column_id_to_column[c1].foreign_key is None:
column_id_to_column[c1].foreign_key = []
column_id_to_column[c1].foreign_key.append(foreign_key)
for i, table_name in enumerate(db['table_names_original']):
column_id_to_table[i] = schemas[db_id][table_name]
# assign id to column and id to table
schema_id_to_table[db_id] = column_id_to_table
schema_id_to_col[db_id] = column_id_to_column
return {**schemas}, {**schema_id_to_col}, {**schema_id_to_table}
def read_dataset_values(db_id: str, database_path: str, tables: List) -> Dict:
db = os.path.join(database_path, db_id, db_id + ".sqlite")
values = {}
if not os.path.exists(db):
# try to read it using ".json" suffix
# assume the table is constrained on single-table
table = tables[0]
db = os.path.join(database_path, db_id, db_id + ".json")
values[table] = json.load(open(db, "r", encoding="utf8"))["rows"]
else:
try:
conn = sqlite3.connect(db)
except Exception as e:
raise Exception(f"Can't connect to SQL: {e} in path {db}")
conn.text_factory = str
cursor = conn.cursor()
for table in tables:
try:
cursor.execute(f"SELECT * FROM {table.name} LIMIT 5000")
values[table] = cursor.fetchall()
except:
conn.text_factory = lambda x: str(x, 'latin1')
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM {table.name} LIMIT 5000")
values[table] = cursor.fetchall()
return values
def ent_key_to_name(key):
parts = key.split(':')
if parts[0] == 'table':
return parts[1]
elif parts[0] == 'column':
_, _, table_name, column_name = parts
return f'{table_name}@{column_name}'
else:
return parts[1]
| ContextualSP/semantic_parsing_in_context/context/utils.py/0 | {
"file_path": "ContextualSP/semantic_parsing_in_context/context/utils.py",
"repo_id": "ContextualSP",
"token_count": 2508
} | 266 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Dict, Generic, List, TypeVar
from allennlp.nn import util
from constant import SpecialSymbol
ActionRepresentation = TypeVar('ActionRepresentation') # pylint: disable=invalid-name
class GrammarStatelet(Generic[ActionRepresentation]):
"""
A ``GrammarStatelet`` keeps track of the currently valid actions at every step of decoding.
This class is relatively simple: we have a non-terminal stack which tracks which non-terminals
we still need to expand. At every timestep of decoding, we take an action that pops something
off of the non-terminal stack, and possibly pushes more things on. The grammar state is
"finished" when the non-terminal stack is empty.
At any point during decoding, you can query this object to get a representation of all of the
valid actions in the current state. The representation is something that you provide when
constructing the initial state, in whatever form you want, and we just hold on to it for you
and return it when you ask. Putting this in here is purely for convenience, to group together
pieces of state that are related to taking actions - if you want to handle the action
representations outside of this class, that would work just fine too.
Parameters
----------
nonterminal_stack : ``List[str]``
Holds the list of non-terminals that still need to be expanded. This starts out as
[START_SYMBOL], and decoding ends when this is empty. Every time we take an action, we
update the non-terminal stack and the context-dependent valid actions, and we use what's on
the stack to decide which actions are valid in the current state.
valid_actions : ``Dict[str, ActionRepresentation]``
A mapping from non-terminals (represented as strings) to all valid expansions of that
non-terminal. The class that constructs this object can pick how it wants the actions to
be represented.
is_nonterminal : ``Callable[[str], bool]``
A function that is used to determine whether each piece of the RHS of the action string is
a non-terminal that needs to be added to the non-terminal stack. You can use
``type_declaraction.is_nonterminal`` here, or write your own function if that one doesn't
work for your domain.
reverse_productions: ``bool``, optional (default=True)
A flag that reverses the production rules when ``True``. If the production rules are
reversed, then the first non-terminal in the production will be popped off the stack first,
giving us left-to-right production. If this is ``False``, you will get right-to-left
production.
"""
def __init__(self,
nonterminal_stack: List[str],
valid_actions: Dict[str, ActionRepresentation],
is_nonterminal: Callable[[str], bool],
reverse_productions: bool = True) -> None:
self._nonterminal_stack = nonterminal_stack
self._valid_actions = valid_actions
self._is_nonterminal = is_nonterminal
self._reverse_productions = reverse_productions
def is_finished(self) -> bool:
"""
Have we finished producing our logical form? We have finished producing the logical form
if and only if there are no more non-terminals on the stack.
"""
return not self._nonterminal_stack
def get_valid_actions(self) -> ActionRepresentation:
"""
Returns the valid actions in the current grammar state. The `Model` determines what
exactly this looks like when it constructs the `valid_actions` dictionary.
"""
return self._valid_actions[self._nonterminal_stack[-1]]
def take_action(self, production_rule: str) -> 'GrammarStatelet':
"""
Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack. Updating the non-terminal stack involves popping
the non-terminal that was expanded off of the stack, then pushing on any non-terminals in
the production rule back on the stack.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``.
If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in
in their given order, which means that the first non-terminal in the production rule gets
popped off the stack `last`.
"""
special_str = SpecialSymbol.copy_delimiter
if special_str in production_rule:
# segment-level copy, remove the first copy
production_rule = production_rule.replace(special_str, '', 1)
if special_str in production_rule:
first_production_rule = production_rule[:production_rule.find(special_str)].strip()
left_side, _ = first_production_rule.split(' -> ')
last_production_rule = production_rule[production_rule.rfind(special_str) + len(special_str):].strip()
_, right_side = last_production_rule.split(' -> ')
else:
left_side, right_side = production_rule.split(' -> ')
else:
left_side, right_side = production_rule.split(' -> ')
assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}"
f"but got rule {left_side} -> {right_side}")
new_stack = self._nonterminal_stack[:-1]
productions = self._get_productions_from_string(right_side)
if self._reverse_productions:
productions = list(reversed(productions))
for production in productions:
if self._is_nonterminal(production):
new_stack.append(production)
return GrammarStatelet(nonterminal_stack=new_stack,
valid_actions=self._valid_actions,
is_nonterminal=self._is_nonterminal,
reverse_productions=self._reverse_productions)
@staticmethod
def _get_productions_from_string(production_string: str) -> List[str]:
"""
Takes a string like 'Select Order' and parses it into a list like ['Select', 'Order']
"""
return production_string.split(" ")
def __eq__(self, other):
if isinstance(self, other.__class__):
# pylint: disable=protected-access
return all([
self._nonterminal_stack == other._nonterminal_stack,
util.tensors_equal(self._valid_actions, other._valid_actions),
self._is_nonterminal == other._is_nonterminal,
self._reverse_productions == other._reverse_productions,
])
return NotImplemented
| ContextualSP/semantic_parsing_in_context/models/states_machine/grammar_state_let.py/0 | {
"file_path": "ContextualSP/semantic_parsing_in_context/models/states_machine/grammar_state_let.py",
"repo_id": "ContextualSP",
"token_count": 2656
} | 267 |
# Copyright (c) Facebook, Inc. and Microsoft Corporation.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import html
import re
import xml.etree.ElementTree as ET
from collections import defaultdict
from urllib.parse import unquote
from bs4 import BeautifulSoup
from genre.entity_linking import (
get_end_to_end_prefix_allowed_tokens_fn_fairseq,
get_end_to_end_prefix_allowed_tokens_fn_hf,
)
def chunk_it(seq, num):
assert num > 0
chunk_len = len(seq) // num
chunks = [seq[i * chunk_len : i * chunk_len + chunk_len] for i in range(num)]
diff = len(seq) - chunk_len * num
for i in range(diff):
chunks[i].append(seq[chunk_len * num + i])
return chunks
def batch_it(seq, num=1):
out = []
for item in seq:
if len(out) == num:
yield out
out = []
out.append(item)
if len(out):
yield out
def create_input(doc, max_length, start_delimiter, end_delimiter):
if "meta" in doc and all(
e in doc["meta"] for e in ("left_context", "mention", "right_context")
):
if len(doc["input"].split(" ")) <= max_length:
input_ = (
doc["meta"]["left_context"]
+ " {} ".format(start_delimiter)
+ doc["meta"]["mention"]
+ " {} ".format(end_delimiter)
+ doc["meta"]["right_context"]
)
elif len(doc["meta"]["left_context"].split(" ")) <= max_length // 2:
input_ = (
doc["meta"]["left_context"]
+ " {} ".format(start_delimiter)
+ doc["meta"]["mention"]
+ " {} ".format(end_delimiter)
+ " ".join(
doc["meta"]["right_context"].split(" ")[
: max_length - len(doc["meta"]["left_context"].split(" "))
]
)
)
elif len(doc["meta"]["right_context"].split(" ")) <= max_length // 2:
input_ = (
" ".join(
doc["meta"]["left_context"].split(" ")[
len(doc["meta"]["right_context"].split(" ")) - max_length :
]
)
+ " {} ".format(start_delimiter)
+ doc["meta"]["mention"]
+ " {} ".format(end_delimiter)
+ doc["meta"]["right_context"]
)
else:
input_ = (
" ".join(doc["meta"]["left_context"].split(" ")[-max_length // 2 :])
+ " {} ".format(start_delimiter)
+ doc["meta"]["mention"]
+ " {} ".format(end_delimiter)
+ " ".join(doc["meta"]["right_context"].split(" ")[: max_length // 2])
)
else:
input_ = doc["input"]
input_ = html.unescape(input_)
return input_
def get_entity_spans_pre_processing(sentences):
return [
(
" {} ".format(sent)
.replace("\xa0", " ")
.replace("{", "(")
.replace("}", ")")
.replace("[", "(")
.replace("]", ")")
)
for sent in sentences
]
def get_entity_spans_post_processing(sentences):
outputs = []
for sent in sentences:
sent = re.sub(r"{.*?", "{ ", sent)
sent = re.sub(r"}.*?", "} ", sent)
sent = re.sub(r"\].*?", "] ", sent)
sent = re.sub(r"\[.*?", "[ ", sent)
sent = re.sub(r"\s{2,}", " ", sent)
sent = re.sub(r"\. \. \} \[ (.*?) \]", r". } [ \1 ] .", sent)
sent = re.sub(r"\, \} \[ (.*?) \]", r" } [ \1 ] ,", sent)
sent = re.sub(r"\; \} \[ (.*?) \]", r" } [ \1 ] ;", sent)
sent = sent.replace("{ ", "{").replace(" } [ ", "}[").replace(" ]", "]")
outputs.append(sent)
return outputs
def _get_entity_spans(
model,
input_sentences,
prefix_allowed_tokens_fn,
redirections=None,
):
output_sentences = model.sample(
get_entity_spans_pre_processing(input_sentences),
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
output_sentences = get_entity_spans_post_processing(
[e[0]["text"] for e in output_sentences]
)
return get_entity_spans_finalize(
input_sentences, output_sentences, redirections=redirections
)
def get_entity_spans_fairseq(
model,
input_sentences,
mention_trie=None,
candidates_trie=None,
mention_to_candidates_dict=None,
redirections=None,
):
return _get_entity_spans(
model,
input_sentences,
prefix_allowed_tokens_fn=get_end_to_end_prefix_allowed_tokens_fn_fairseq(
model,
get_entity_spans_pre_processing(input_sentences),
mention_trie=mention_trie,
candidates_trie=candidates_trie,
mention_to_candidates_dict=mention_to_candidates_dict,
),
redirections=redirections,
)
def get_entity_spans_hf(
model,
input_sentences,
mention_trie=None,
candidates_trie=None,
mention_to_candidates_dict=None,
redirections=None,
):
return _get_entity_spans(
model,
input_sentences,
prefix_allowed_tokens_fn=get_end_to_end_prefix_allowed_tokens_fn_hf(
model,
get_entity_spans_pre_processing(input_sentences),
mention_trie=mention_trie,
candidates_trie=candidates_trie,
mention_to_candidates_dict=mention_to_candidates_dict,
),
redirections=redirections,
)
def get_entity_spans_finalize(input_sentences, output_sentences, redirections=None):
return_outputs = []
for input_, output_ in zip(input_sentences, output_sentences):
input_ = input_.replace("\xa0", " ") + " -"
output_ = output_.replace("\xa0", " ") + " -"
entities = []
status = "o"
i = 0
j = 0
while j < len(output_) and i < len(input_):
if status == "o":
if input_[i] == output_[j] or (
output_[j] in "()" and input_[i] in "[]{}"
):
i += 1
j += 1
elif output_[j] == " ":
j += 1
elif input_[i] == " ":
i += 1
elif output_[j] == "{":
entities.append([i, 0, ""])
j += 1
status = "m"
else:
raise RuntimeError
elif status == "m":
if input_[i] == output_[j]:
i += 1
j += 1
entities[-1][1] += 1
elif output_[j] == " ":
j += 1
elif input_[i] == " ":
i += 1
elif output_[j] == "}":
j += 1
status = "e"
else:
raise RuntimeError
elif status == "e":
if output_[j] == "[":
j += 1
elif output_[j] != "]":
entities[-1][2] += output_[j]
j += 1
elif output_[j] == "]":
entities[-1][2] = entities[-1][2].replace(" ", "_")
if len(entities[-1][2]) <= 1:
del entities[-1]
elif entities[-1][2] == "NIL":
del entities[-1]
elif redirections is not None and entities[-1][2] in redirections:
entities[-1][2] = redirections[entities[-1][2]]
if len(entities) > 0:
entities[-1] = tuple(entities[-1])
status = "o"
j += 1
else:
raise RuntimeError
return_outputs.append(entities)
return return_outputs
def get_markdown(sentences, entity_spans):
return_outputs = []
for sent, entities in zip(sentences, entity_spans):
text = ""
last_end = 0
for begin, length, href in entities:
text += sent[last_end:begin]
text += "[{}](https://en.wikipedia.org/wiki/{})".format(
sent[begin : begin + length], href
)
last_end = begin + length
text += sent[last_end:]
return_outputs.append(text)
return return_outputs
def strong_tp(guess_entities, gold_entities):
return len(gold_entities.intersection(guess_entities))
def weak_tp(guess_entities, gold_entities):
tp = 0
for pred in guess_entities:
for gold in gold_entities:
if (
pred[0] == gold[0]
and (
gold[1] <= pred[1] <= gold[1] + gold[2]
or gold[1] <= pred[1] + pred[2] <= gold[1] + gold[2]
)
and pred[3] == gold[3]
):
tp += 1
return tp
def get_micro_precision(guess_entities, gold_entities, mode="strong"):
guess_entities = set(guess_entities)
gold_entities = set(gold_entities)
if mode == "strong":
return (
(strong_tp(guess_entities, gold_entities) / len(guess_entities))
if len(guess_entities)
else 0
)
elif mode == "weak":
return (
(weak_tp(guess_entities, gold_entities) / len(guess_entities))
if len(guess_entities)
else 0
)
def get_micro_recall(guess_entities, gold_entities, mode="strong"):
guess_entities = set(guess_entities)
gold_entities = set(gold_entities)
if mode == "strong":
return (
(strong_tp(guess_entities, gold_entities) / len(gold_entities))
if len(gold_entities)
else 0
)
elif mode == "weak":
return (
(weak_tp(guess_entities, gold_entities) / len(gold_entities))
if len(gold_entities)
else 0
)
def get_micro_f1(guess_entities, gold_entities, mode="strong"):
precision = get_micro_precision(guess_entities, gold_entities, mode)
recall = get_micro_recall(guess_entities, gold_entities, mode)
return (
(2 * (precision * recall) / (precision + recall)) if precision + recall else 0
)
def get_doc_level_guess_gold_entities(guess_entities, gold_entities):
new_guess_entities = defaultdict(list)
for e in guess_entities:
new_guess_entities[e[0]].append(e)
new_gold_entities = defaultdict(list)
for e in gold_entities:
new_gold_entities[e[0]].append(e)
return new_guess_entities, new_gold_entities
def get_macro_precision(guess_entities, gold_entities, mode="strong"):
guess_entities, gold_entities = get_doc_level_guess_gold_entities(
guess_entities, gold_entities
)
all_scores = [
get_micro_precision(guess_entities[k], gold_entities[k], mode)
for k in guess_entities
]
return (sum(all_scores) / len(all_scores)) if len(all_scores) else 0
def get_macro_recall(guess_entities, gold_entities, mode="strong"):
guess_entities, gold_entities = get_doc_level_guess_gold_entities(
guess_entities, gold_entities
)
all_scores = [
get_micro_recall(guess_entities[k], gold_entities[k], mode)
for k in guess_entities
]
return (sum(all_scores) / len(all_scores)) if len(all_scores) else 0
def get_macro_f1(guess_entities, gold_entities, mode="strong"):
guess_entities, gold_entities = get_doc_level_guess_gold_entities(
guess_entities, gold_entities
)
all_scores = [
get_micro_f1(guess_entities[k], gold_entities[k], mode) for k in guess_entities
]
return (sum(all_scores) / len(all_scores)) if len(all_scores) else 0
def extract_pages(filename):
docs = {}
with open(filename) as f:
for line in f:
# CASE 1: beginning of the document
if line.startswith("<doc id="):
doc = ET.fromstring("{}{}".format(line, "</doc>")).attrib
doc["paragraphs"] = []
doc["anchors"] = []
# CASE 2: end of the document
elif line.startswith("</doc>"):
assert doc["id"] not in docs, "{} ({}) already in dict as {}".format(
doc["id"], doc["title"], docs[doc["id"]]["title"]
)
docs[doc["id"]] = doc
# CASE 3: in the document
else:
doc["paragraphs"].append("")
try:
line = BeautifulSoup(line, "html.parser")
except:
print("error line `{}`".format(line))
line = [line]
for span in line:
if isinstance(span, bs4.element.Tag):
if span.get("href", None):
doc["anchors"].append(
{
"text": span.get_text(),
"href": span["href"],
"paragraph_id": len(doc["paragraphs"]) - 1,
"start": len(doc["paragraphs"][-1]),
"end": len(doc["paragraphs"][-1])
+ len(span.get_text()),
}
)
doc["paragraphs"][-1] += span.get_text()
else:
doc["paragraphs"][-1] += str(span)
return docs
def search_simple(anchor, lang, lang_title2wikidataID):
if "http" in anchor:
return True, []
unquoted = unquote(anchor).split("#")[0].replace("_", " ")
if unquoted == "":
return True, []
unquoted = unquoted[0].upper() + unquoted[1:]
if (lang, unquoted) in lang_title2wikidataID:
return True, lang_title2wikidataID[(lang, unquoted)]
else:
return False, unquoted
def search_wikipedia(title, lang, lang_title2wikidataID, lang_redirect2title):
max_redirects = 10
while (lang, title) in lang_redirect2title and max_redirects > 0:
title = lang_redirect2title[(lang, title)]
max_redirects -= 1
if (lang, title) in lang_title2wikidataID:
return True, lang_title2wikidataID[(lang, title)]
else:
return False, title
def search_wikidata(query, label_alias2wikidataID):
return list(set(label_alias2wikidataID.get(query.lower(), [])))
def get_wikidata_ids(
anchor,
lang,
lang_title2wikidataID,
lang_redirect2title,
label_or_alias2wikidataID,
):
success, result = search_simple(anchor, lang, label_or_alias2wikidataID)
if success:
return result, "simple"
else:
success, result = search_wikipedia(
result, lang, lang_title2wikidataID, lang_redirect2title
)
if success:
return result, "wikipedia"
else:
return search_wikidata(result, label_or_alias2wikidataID), "wikidata"
tr2016_langs = ["ar", "de", "es", "fr", "he", "it", "ta", "th", "tl", "tr", "ur", "zh"]
news_langs = [
"ar",
"bg",
"bs",
"ca",
"cs",
"de",
"el",
"en",
"eo",
"es",
"fa",
"fi",
"fr",
"he",
"hu",
"it",
"ja",
"ko",
"nl",
"no",
"pl",
"pt",
"ro",
"ru",
"sd",
"sq",
"sr",
"sv",
"ta",
"th",
"tr",
"uk",
"zh",
]
mewsli_langs = ["ar", "de", "en", "es", "fa", "ja", "sr", "ta", "tr"]
mbart25_langs = [
"ar",
"cs",
"de",
"en",
"es",
"et",
"fi",
"fr",
"gu",
"hi",
"it",
"ja",
"kk",
"ko",
"lt",
"lv",
"my",
"ne",
"nl",
"ro",
"ru",
"si",
"tr",
"vi",
"zh",
]
mbart100_langs = [
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bm",
"bn",
"br",
"bs",
"ca",
"cb",
"ci",
"cs",
"cx",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"ff",
"fi",
"fr",
"fy",
"ga",
"gd",
"gl",
"gn",
"gu",
"ha",
"he",
"hi",
"hr",
"ht",
"hu",
"hy",
"id",
"ig",
"is",
"it",
"ja",
"jv",
"ka",
"kg",
"kk",
"km",
"kn",
"ko",
"ku",
"ky",
"la",
"lg",
"ln",
"lo",
"lt",
"lv",
"mg",
"mk",
"ml",
"mn",
"mr",
"ms",
"my",
"ne",
"nl",
"no",
"ns",
"om",
"or",
"pa",
"pl",
"ps",
"pt",
"q2",
"q3",
"qa",
"qd",
"qf",
"qh",
"qi",
"qj",
"ql",
"qm",
"qp",
"qq",
"qu",
"qw",
"qx",
"qy",
"ro",
"ru",
"sa",
"sd",
"si",
"sk",
"sl",
"so",
"sq",
"sr",
"ss",
"su",
"sv",
"sw",
"ta",
"te",
"th",
"ti",
"tl",
"tn",
"tr",
"uk",
"ur",
"uz",
"vi",
"wo",
"xh",
"yo",
"zh",
"zu",
]
el100_langs = [
"af",
"an",
"ar",
"ar",
"ast",
"az",
"azb",
"ba",
"bar",
"be",
"bg",
"bn",
"bpy",
"br",
"bs",
"ca",
"ce",
"ceb",
"cs",
"cv",
"cy",
"da",
"de",
"el",
"en",
"es",
"et",
"eu",
"fa",
"fi",
"fil",
"fr",
"fy",
"ga",
"gl",
"gu",
"hi",
"hr",
"ht",
"hu",
"hy",
"id",
"io",
"is",
"it",
"iw",
"ja",
"jv",
"ka",
"kk",
"kn",
"ko",
"ky",
"la",
"lah",
"lb",
"lmo",
"lt",
"lv",
"mg",
"min",
"mk",
"ml",
"mn",
"mr",
"ms",
"my",
"nds",
"ne",
"new",
"nl",
"nn",
"no",
"oc",
"pa",
"pl",
"pms",
"pt",
"ro",
"ru",
"scn",
"sco",
"sk",
"sl",
"sq",
"sr",
"sr-Latn",
"su",
"sv",
"sw",
"ta",
"te",
"tg",
"th",
"tr",
"tt",
"uk",
"ur",
"uz",
"vi",
"vo",
"yo",
"zh",
"zh-TW",
]
wiki_langs = [
"aa",
"ab",
"ace",
"ady",
"af",
"ak",
"als",
"am",
"an",
"ang",
"ar",
"arc",
"arz",
"as",
"ast",
"atj",
"av",
"ay",
"az",
"azb",
"ba",
"bar",
"bcl",
"be",
"bg",
"bh",
"bi",
"bjn",
"bm",
"bn",
"bo",
"bpy",
"br",
"bs",
"bug",
"bxr",
"ca",
"cdo",
"ce",
"ceb",
"ch",
"cho",
"chr",
"chy",
"ckb",
"co",
"cr",
"crh",
"cs",
"csb",
"cu",
"cv",
"cy",
"da",
"de",
"din",
"diq",
"dsb",
"dty",
"dv",
"dz",
"ee",
"el",
"eml",
"en",
"eo",
"es",
"et",
"eu",
"ext",
"fa",
"ff",
"fi",
"fj",
"fo",
"fr",
"frp",
"frr",
"fur",
"fy",
"ga",
"gag",
"gan",
"gd",
"gl",
"glk",
"gn",
"gom",
"gor",
"got",
"gu",
"gv",
"ha",
"hak",
"haw",
"he",
"hi",
"hif",
"ho",
"hr",
"hsb",
"ht",
"hu",
"hy",
"hyw",
"hz",
"ia",
"id",
"ie",
"ig",
"ii",
"ik",
"ilo",
"inh",
"io",
"is",
"it",
"iu",
"ja",
"jam",
"jbo",
"jv",
"ka",
"kaa",
"kab",
"kbd",
"kbp",
"kg",
"ki",
"kj",
"kk",
"kl",
"km",
"kn",
"ko",
"koi",
"kr",
"krc",
"ks",
"ksh",
"ku",
"kv",
"kw",
"ky",
"la",
"lad",
"lb",
"lbe",
"lez",
"lfn",
"lg",
"li",
"lij",
"lmo",
"ln",
"lo",
"lrc",
"lt",
"ltg",
"lv",
"mai",
"mdf",
"mg",
"mh",
"mhr",
"mi",
"min",
"mk",
"ml",
"mn",
"mr",
"mrj",
"ms",
"mt",
"mus",
"mwl",
"my",
"myv",
"mzn",
"na",
"nah",
"nap",
"nds",
"ne",
"new",
"ng",
"nl",
"nn",
"no",
"nov",
"nqo",
"nrm",
"nso",
"nv",
"ny",
"oc",
"olo",
"om",
"or",
"os",
"pa",
"pag",
"pam",
"pap",
"pcd",
"pdc",
"pfl",
"pi",
"pih",
"pl",
"pms",
"pnb",
"pnt",
"ps",
"pt",
"qu",
"rm",
"rmy",
"rn",
"ro",
"ru",
"rue",
"rw",
"sa",
"sah",
"sat",
"sc",
"scn",
"sco",
"sd",
"se",
"sg",
"sh",
"shn",
"si",
"simple",
"sk",
"sl",
"sm",
"sn",
"so",
"sq",
"sr",
"srn",
"ss",
"st",
"stq",
"su",
"sv",
"sw",
"szl",
"ta",
"tcy",
"te",
"tet",
"tg",
"th",
"ti",
"tk",
"tl",
"tn",
"to",
"tpi",
"tr",
"ts",
"tt",
"tum",
"tw",
"ty",
"tyv",
"udm",
"ug",
"uk",
"ur",
"uz",
"ve",
"vec",
"vep",
"vi",
"vls",
"vo",
"wa",
"war",
"wo",
"wuu",
"xal",
"xh",
"xmf",
"yi",
"yo",
"za",
"zea",
"zh",
]
our105_langs = sorted(set(mbart100_langs).intersection(set(wiki_langs)))
our105_langs_to_name = {
"af": "Afrikaans",
"sq": "Albanian",
"am": "Amharic",
"ar": "Arabic",
"hy": "Armenian",
"as": "Assamese",
"az": "Azerbaijani",
"bm": "Bambara",
"eu": "Basque",
"be": "Belarusian",
"bn": "Bengali",
"bs": "Bosnian",
"br": "Breton",
"bg": "Bulgarian",
"my": "Burmese",
"ca": "Catalan",
"zh": "Chinese",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"eo": "Esperanto",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"ff": "Fulah",
"gl": "Galician",
"ka": "Georgian",
"de": "German",
"el": "Greek",
"gn": "Guarani",
"gu": "Gujarati",
"ht": "Haitian",
"ha": "Hausa",
"he": "Hebrew",
"hi": "Hindi",
"hu": "Hungarian",
"id": "Indonesian",
"ga": "Irish",
"ig": "Igbo",
"is": "Icelandic",
"it": "Italian",
"ja": "Japanese",
"jv": "Javanese",
"kn": "Kannada",
"kk": "Kazakh",
"km": "Khmer",
"ky": "Kyrgyz",
"kg": "Kongo",
"ko": "Korean",
"ku": "Kurdish",
"la": "Latin",
"lg": "Ganda",
"ln": "Lingala",
"lo": "Lao",
"lt": "Lithuanian",
"lv": "Latvian",
"mk": "Macedonian",
"mg": "Malagasy",
"ms": "Malay",
"ml": "Malayalam",
"mr": "Marathi",
"mn": "Mongolian",
"ne": "Nepali",
"no": "Norwegian",
"om": "Oromo",
"or": "Oriya",
"pa": "Panjabi",
"fa": "Persian",
"pl": "Polish",
"ps": "Pashto",
"pt": "Portuguese",
"qu": "Quechua",
"ro": "Romanian",
"ru": "Russian",
"sa": "Sanskrit",
"sd": "Sindhi",
"sr": "Serbian",
"gd": "Gaelic,",
"si": "Sinhala",
"sk": "Slovak",
"sl": "Slovenian",
"so": "Somali",
"es": "Spanish",
"su": "Sundanese",
"sw": "Swahili",
"ss": "Swati",
"sv": "Swedish",
"ta": "Tamil",
"te": "Telugu",
"th": "Thai",
"ti": "Tigrinya",
"tl": "Tagalog",
"tn": "Tswana",
"tr": "Turkish",
"uk": "Ukrainian",
"ur": "Urdu",
"uz": "Uzbek",
"vi": "Vietnamese",
"cy": "Welsh",
"wo": "Wolof",
"fy": "Frysk",
"xh": "Xhosa",
"yo": "Yoruba",
}
| ContextualSP/unified_parser_text_to_sql/genre/utils.py/0 | {
"file_path": "ContextualSP/unified_parser_text_to_sql/genre/utils.py",
"repo_id": "ContextualSP",
"token_count": 13574
} | 268 |
import argparse
import json
import re
import subprocess
from collections import defaultdict
from re import RegexFlag
import networkx as nx
import torch
from genre.fairseq_model import GENRE, mGENRE
from genre.entity_linking import get_end_to_end_prefix_allowed_tokens_fn_fairseq as get_prefix_allowed_tokens_fn
from genre.trie import Trie
from semparse.sql.spider import load_original_schemas, load_tables
from semparse.worlds.evaluate_spider import evaluate as evaluate_sql
from step1_schema_linking import read_database_schema
database_dir='./data/spider/database'
database_schema_filename = './data/spider/tables.json'
schema_tokens, column_names, database_schemas = read_database_schema(database_schema_filename)
with open(f'./data/spider/dev.json', 'r', encoding='utf-8') as f:
item = json.load(f)
sql_to_db = []
for i in item:
sql_to_db.append(i['db_id'])
def post_processing_sql(p_sql, foreign_key_maps, schemas, o_schemas):
foreign_key = {}
for k, v in foreign_key_maps.items():
if k == v:
continue
key = ' '.join(sorted([k.split('.')[0].strip('_'), v.split('.')[0].strip('_')]))
foreign_key[key] = (k.strip('_').replace('.', '@'), v.strip('_').replace('.', '@'))
primary_key = {}
for t in o_schemas.tables:
table = t.orig_name.lower()
if len(t.primary_keys) == 0:
continue
column = t.primary_keys[0].orig_name.lower()
primary_key[table] = f'{table}@{column}'
p_sql = re.sub(r'(=)(\S+)', r'\1 \2', p_sql)
p_sql = p_sql.split()
columns = ['*']
tables = []
for table, column_list in schemas.schema.items():
for column in column_list:
columns.append(f"{table}@{column}")
tables.append(table)
# infer table from mentioned column
all_from_table_ids = set()
from_idx = where_idx = group_idx = order_idx = -1
for idx, token in enumerate(p_sql):
if '@' in token and token in columns:
all_from_table_ids.add(schemas.idMap[token.split('@')[0]])
if token == 'from' and from_idx == -1:
from_idx = idx
if token == 'where' and where_idx == -1:
where_idx = idx
if token == 'group' and group_idx == -1:
group_idx = idx
if token == 'order' and order_idx == -1:
order_idx = idx
#don't process nested SQL (more than one select)
if len(re.findall('select', ' '.join(p_sql))) > 1 or len(all_from_table_ids) == 0:
return ' '.join(p_sql)
covered_tables = set()
candidate_table_ids = sorted(all_from_table_ids)
start_table_id = candidate_table_ids[0]
conds = set()
all_conds = []
for table_id in candidate_table_ids[1:]:
if table_id in covered_tables:
continue
try:
path = nx.shortest_path(
o_schemas.foreign_key_graph,
source=start_table_id,
target=table_id,
)
except (nx.NetworkXNoPath, nx.NodeNotFound):
covered_tables.add(table_id)
continue
for source_table_id, target_table_id in zip(path, path[1:]):
if target_table_id in covered_tables:
continue
covered_tables.add(target_table_id)
all_from_table_ids.add(target_table_id)
col1, col2 = o_schemas.foreign_key_graph[source_table_id][target_table_id]["columns"]
all_conds.append((columns[col1], columns[col2]))
conds.add((tables[source_table_id],
tables[target_table_id],
columns[col1],
columns[col2]))
all_from_table_ids = list(all_from_table_ids)
try:
tokens = ["from", tables[all_from_table_ids[0]]]
for i, table_id in enumerate(all_from_table_ids[1:]):
tokens += ["join"]
tokens += [tables[table_id]]
tokens += ["on", all_conds[i][0], "=", all_conds[i][1]]
except:
return ' '.join(p_sql)
if where_idx != -1:
p_sql = p_sql[:from_idx] + tokens + p_sql[where_idx:]
elif group_idx != -1:
p_sql = p_sql[:from_idx] + tokens + p_sql[group_idx:]
elif order_idx != -1:
p_sql = p_sql[:from_idx] + tokens + p_sql[order_idx:]
elif len(p_sql[:from_idx] + p_sql[from_idx:]) == len(p_sql):
p_sql = p_sql[:from_idx] + tokens
return ' '.join(p_sql)
def extract_structure_data(plain_text_content: str):
def sort_by_id(data):
data.sort(key=lambda x: int(x.split('\t')[0][2:]))
return data
data = []
original_schemas = load_original_schemas(database_schema_filename)
schemas, eval_foreign_key_maps = load_tables(database_schema_filename)
predict_outputs = sort_by_id(re.findall("^D.+", plain_text_content, RegexFlag.MULTILINE))
ground_outputs = sort_by_id(re.findall("^T.+", plain_text_content, RegexFlag.MULTILINE))
source_inputs = sort_by_id(re.findall("^S.+", plain_text_content, RegexFlag.MULTILINE))
for idx, (predict, ground, source) in enumerate(zip(predict_outputs, ground_outputs, source_inputs)):
predict_id, predict_score, predict_clean = predict.split('\t')
ground_id, ground_clean = ground.split('\t')
source_id, source_clean = source.split('\t')
db_id = sql_to_db[idx]
#try to postprocess the incomplete sql from
# (1) correcting the COLUMN in ON_CLAUSE based on foreign key graph
# (2) adding the underlying TABLE via searching shortest path
predict_clean = post_processing_sql(predict_clean, eval_foreign_key_maps[db_id], original_schemas[db_id],
schemas[db_id])
data.append((predict_id[2:], source_clean.split('<Q>')[-1].strip(), ground_clean, predict_clean, db_id))
return data
def evaluate(data):
def evaluate_example(_predict_str: str, _ground_str: str):
return re.sub("\s+", "", _predict_str.lower()) == re.sub("\s+", "", _ground_str.lower())
correct_num = 0
correct_tag_list = []
total = 0
tmp = []
for example in data:
idx, source_str, ground_str, predict_str, db_id = example
total += 1
try:
sql_match = evaluate_sql(gold=ground_str.replace('@', '.'),
predict=predict_str.replace('@', '.'),
db_name=db_id,
db_dir=database_dir,
table=database_schema_filename)
except:
print(predict_str)
sql_match = False
if (sql_match or evaluate_example(predict_str, ground_str)):
is_correct = True
correct_num += 1
else:
is_correct = False
tmp.append(is_correct)
correct_tag_list.append(is_correct)
print("Correct/Total : {}/{}, {:.4f}".format(correct_num, total, correct_num / total))
return correct_tag_list, correct_num, total
def predict_and_evaluate(model_path, dataset_path, constrain):
if constrain:
data = predict_with_constrain(
model_path=model_path,
dataset_path=dataset_path
)
else:
decode_without_constrain(
model_path=model_path,
dataset_path=dataset_path
)
with open('./eval/generate-valid.txt', "r", encoding="utf8") as generate_f:
file_content = generate_f.read()
data = extract_structure_data(file_content)
correct_arr, correct_num, total = evaluate(data)
with open('./eval/spider_eval.txt', "w", encoding="utf8") as eval_file:
for example, correct in zip(data, correct_arr):
eval_file.write(str(correct) + "\n" + "\n".join(
[example[0], "db: " + example[-1], example[1], "gold: " + example[2], "pred: " + example[3]]) + "\n\n")
return correct_num, total
def get_alias_schema(schemas):
alias_schema = {}
for db in schemas:
schema = schemas[db].orig
collect = []
for i, (t, c) in enumerate(zip(schema['column_types'], schema['column_names_original'])):
if c[0] == -1:
collect.append('*')
else:
column_with_alias = "{0}@{1}".format(schema['table_names_original'][c[0]].lower(), c[1].lower())
collect.append(column_with_alias)
for t in schema['table_names_original']:
collect.append(t.lower())
collect.append("'value'")
alias_schema[db] = collect
return alias_schema
def predict_with_constrain(model_path, dataset_path):
schemas, eval_foreign_key_maps = load_tables(database_schema_filename)
original_schemas = load_original_schemas(database_schema_filename)
with open(f'{dataset_path}/dev.src', 'r', encoding='utf-8') as f:
item = [i.strip() for i in f.readlines()]
with open(f'{dataset_path}/dev.tgt', 'r', encoding='utf-8') as f:
ground = [i.strip() for i in f.readlines()]
alias_schema = get_alias_schema(schemas)
item_db_cluster = defaultdict(list)
ground_db_cluster = defaultdict(list)
source_db_cluster = defaultdict(list)
num_example = 1034
for db, sentence, g_sql in zip(sql_to_db[:num_example], item[:num_example], ground[:num_example]):
source = sentence.split('<Q>')[-1].strip()
item_db_cluster[db].append(sentence)
ground_db_cluster[db].append(g_sql)
source_db_cluster[db].append(source)
source = []
ground = []
for db, sentence in source_db_cluster.items():
source.extend(sentence)
for db, g_SQL in ground_db_cluster.items():
ground.extend(g_SQL)
model = GENRE.from_pretrained(model_path).eval()
if torch.cuda.is_available():
model.cuda()
result=[]
for db, sentence in item_db_cluster.items():
print(f'processing db: {db} with {len(sentence)} sentences')
rnt=decode_with_constrain(sentence, alias_schema[db], model)
result.extend([i[0]['text'] if isinstance(i[0]['text'], str) else i[0]['text'][0] for i in rnt])
eval_file_path= f'./eval/generate-valid-constrain.txt'
with open(eval_file_path, "w", encoding="utf8") as f:
f.write('\n'.join(result))
# result = []
# with open(f'./eval/generate-valid-constrain.txt', "r", encoding="utf8") as f:
# for idx, (sent, db_id) in enumerate(zip(f.readlines(), sql_to_db)):
# result.append(sent.strip())
data = []
for predict_id, (predict_clean, ground_clean, source_clean, db_id) in enumerate(
zip(result, ground, source, sql_to_db)):
predict_clean = post_processing_sql(predict_clean, eval_foreign_key_maps[db_id], original_schemas[db_id],
schemas[db_id])
data.append((str(predict_id), source_clean.split('<Q>')[-1].strip(), ground_clean, predict_clean, db_id))
return data
def decode_with_constrain(sentences, schema, model):
trie = Trie([
model.encode(" {}".format(e))[1:].tolist()
for e in schema
])
prefix_allowed_tokens_fn = get_prefix_allowed_tokens_fn(
model,
sentences,
mention_trie=trie,
)
return model.sample(
sentences,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
def decode_without_constrain( model_path, dataset_path):
cmd = f'fairseq-generate \
--path {model_path}/model.pt {dataset_path}/bin \
--gen-subset valid \
--nbest 1 \
--max-tokens 4096 \
--source-lang src --target-lang tgt \
--results-path ./eval \
--beam 5 \
--bpe gpt2 \
--remove-bpe \
--skip-invalid-size-inputs-valid-test'
subprocess.Popen(
cmd, universal_newlines=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", default='./models/spider_sl')
parser.add_argument("--dataset_path", default='./dataset_post/spider_sl')
parser.add_argument("--constrain", action='store_true')
args = parser.parse_args()
predict_and_evaluate(model_path=args.model_path,
dataset_path=args.dataset_path,
constrain=args.constrain)
| ContextualSP/unified_parser_text_to_sql/step3_evaluate.py/0 | {
"file_path": "ContextualSP/unified_parser_text_to_sql/step3_evaluate.py",
"repo_id": "ContextualSP",
"token_count": 5755
} | 269 |
# model settings
input_size = 300
model = dict(
type='RetinaNet',
pretrained='/home2/hongyuan/cydas/spos/mmdetection/390.pth.tar',
backbone=dict(
type='SSDMobilenetV3',
input_size=input_size,
activation_type='relu6',
single_scale=True
),
neck=dict(
type='FPN',
in_channels=[24, 40, 96, 960],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/home2/hongyuan/data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/CyDAS_390'
load_from = None
resume_from = None
workflow = [('train', 1)]
| Cream/CDARTS/CDARTS_detection/configs/CyDAS_retinanet_1x.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/configs/CyDAS_retinanet_1x.py",
"repo_id": "Cream",
"token_count": 1920
} | 270 |
from pathlib import Path
from ..utils import is_list_of, is_str
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
file_handlers = {
'json': JsonHandler(),
'yaml': YamlHandler(),
'yml': YamlHandler(),
'pickle': PickleHandler(),
'pkl': PickleHandler()
}
def load(file, file_format=None, **kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or :obj:`Path` or file-like object): Filename or a file-like
object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None and is_str(file):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
def dump(obj, file=None, file_format=None, **kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or :obj:`Path` or file-like object, optional): If not
specified, then the object is dump to a str, otherwise to a file
specified by the filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None:
if is_str(file):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def _register_handler(handler, file_formats):
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
'handler must be a child of BaseFileHandler, not {}'.format(
type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
def register_handler(file_formats, **kwargs):
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
| Cream/CDARTS/CDARTS_detection/mmcv/fileio/io.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/io.py",
"repo_id": "Cream",
"token_count": 1481
} | 271 |
import torch
from torch.nn.parallel._functions import Scatter as OrigScatter
from ._functions import Scatter
from .data_container import DataContainer
def scatter(inputs, target_gpus, dim=0):
"""Scatter inputs to target gpus.
The only difference from original :func:`scatter` is to add support for
:type:`~mmcv.parallel.DataContainer`.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return OrigScatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if isinstance(obj, dict) and len(obj) > 0:
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| Cream/CDARTS/CDARTS_detection/mmcv/parallel/scatter_gather.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/parallel/scatter_gather.py",
"repo_id": "Cream",
"token_count": 838
} | 272 |
from torch.nn.utils import clip_grad
from .hook import Hook
class OptimizerHook(Hook):
def __init__(self, grad_clip=None):
self.grad_clip = grad_clip
def clip_grads(self, params):
clip_grad.clip_grad_norm_(
filter(lambda p: p.requires_grad, params), **self.grad_clip)
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
class OptimizerArchHook(Hook):
def __init__(self, grad_clip=None):
self.grad_clip = grad_clip
def clip_grads(self, params):
clip_grad.clip_grad_norm_(
filter(lambda p: p.requires_grad, params), **self.grad_clip)
def arch_after_train_iter(self, runner):
if runner.optimizer_arch is not None:
runner.optimizer_arch.zero_grad()
runner.outputs_arch['loss'].backward()
if runner.optimizer_arch is not None:
runner.optimizer_arch.step() | Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/optimizer.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/optimizer.py",
"repo_id": "Cream",
"token_count": 466
} | 273 |
import numpy as np
from mmcv._ext import flow_warp_c
from mmcv.arraymisc import dequantize, quantize
from mmcv.image import imread, imwrite
from mmcv.utils import is_str
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):
"""Read an optical flow map.
Args:
flow_or_path (ndarray or str): A flow map or filepath.
quantize (bool): whether to read quantized pair, if set to True,
remaining args will be passed to :func:`dequantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
Returns:
ndarray: Optical flow represented as a (h, w, 2) numpy array
"""
if isinstance(flow_or_path, np.ndarray):
if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2):
raise ValueError('Invalid flow with shape {}'.format(
flow_or_path.shape))
return flow_or_path
elif not is_str(flow_or_path):
raise TypeError(
'"flow_or_path" must be a filename or numpy array, not {}'.format(
type(flow_or_path)))
if not quantize:
with open(flow_or_path, 'rb') as f:
try:
header = f.read(4).decode('utf-8')
except Exception:
raise IOError('Invalid flow file: {}'.format(flow_or_path))
else:
if header != 'PIEH':
raise IOError(
'Invalid flow file: {}, header does not contain PIEH'.
format(flow_or_path))
w = np.fromfile(f, np.int32, 1).squeeze()
h = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))
else:
assert concat_axis in [0, 1]
cat_flow = imread(flow_or_path, flag='unchanged')
if cat_flow.ndim != 2:
raise IOError(
'{} is not a valid quantized flow file, its dimension is {}.'.
format(flow_or_path, cat_flow.ndim))
assert cat_flow.shape[concat_axis] % 2 == 0
dx, dy = np.split(cat_flow, 2, axis=concat_axis)
flow = dequantize_flow(dx, dy, *args, **kwargs)
return flow.astype(np.float32)
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
"""Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
"""
if not quantize:
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert concat_axis in [0, 1]
dx, dy = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
imwrite(dxdy, filename)
def quantize_flow(flow, max_val=0.02, norm=True):
"""Quantize flow to [0, 255].
After this step, the size of flow will be much smaller, and can be
dumped as jpeg images.
Args:
flow (ndarray): (h, w, 2) array of optical flow.
max_val (float): Maximum value of flow, values beyond
[-max_val, max_val] will be truncated.
norm (bool): Whether to divide flow values by image width/height.
Returns:
tuple[ndarray]: Quantized dx and dy.
"""
h, w, _ = flow.shape
dx = flow[..., 0]
dy = flow[..., 1]
if norm:
dx = dx / w # avoid inplace operations
dy = dy / h
# use 255 levels instead of 256 to make sure 0 is 0 after dequantization.
flow_comps = [
quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]
]
return tuple(flow_comps)
def dequantize_flow(dx, dy, max_val=0.02, denorm=True):
"""Recover from quantized flow.
Args:
dx (ndarray): Quantized dx.
dy (ndarray): Quantized dy.
max_val (float): Maximum value used when quantizing.
denorm (bool): Whether to multiply flow values with width/height.
Returns:
ndarray: Dequantized flow.
"""
assert dx.shape == dy.shape
assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)
dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]
if denorm:
dx *= dx.shape[1]
dy *= dx.shape[0]
flow = np.dstack((dx, dy))
return flow
def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'):
"""Use flow to warp img
Args:
img (ndarray, float or uint8): Image to be warped.
flow (ndarray, float): Optical Flow.
filling_value (int): The missing pixels will be set with filling_value.
interpolate_mode (str): bilinear -> Bilinear Interpolation;
nearest -> Nearest Neighbor.
Returns:
ndarray: Warped image with the same shape of img
"""
interpolate_mode_dict = {'bilinear': 0, 'nearest': 1}
assert len(img.shape) == 3
assert len(flow.shape) == 3 and flow.shape[2] == 2
assert flow.shape[:2] == img.shape[:2]
assert interpolate_mode in interpolate_mode_dict.keys()
interpolate_mode = interpolate_mode_dict[interpolate_mode]
img_float = img.astype(np.float64)
out = flow_warp_c(
img_float,
flow.astype(np.float64),
filling_value=filling_value,
interpolate_mode=interpolate_mode)
return out
| Cream/CDARTS/CDARTS_detection/mmcv/video/optflow.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/optflow.py",
"repo_id": "Cream",
"token_count": 2719
} | 274 |
from abc import ABCMeta, abstractmethod
class BaseAssigner(metaclass=ABCMeta):
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
pass
| Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py",
"repo_id": "Cream",
"token_count": 78
} | 275 |
import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
| Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/class_names.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/class_names.py",
"repo_id": "Cream",
"token_count": 2389
} | 276 |
from collections import OrderedDict
import os
import torch.distributed as dist
from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
_take_tensors)
from mmcv.runner import OptimizerHook, OptimizerArchHook
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
#if runner.rank == 0:
# os.system('df -h /dev/shm/')
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
allreduce_grads(runner.model.parameters(), self.coalesce,
self.bucket_size_mb)
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
class DistOptimizerArchHook(OptimizerArchHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def arch_after_train_iter(self, runner):
if runner.optimizer_arch is not None:
runner.optimizer_arch.zero_grad()
#runner.optimizer.zero_grad()
runner.outputs_arch['loss'].backward()
#allreduce_grads(runner.model.parameters(), self.coalesce,
# self.bucket_size_mb)
params = []
if 'backbone' in runner.arch_name:
raise NotImplementedError
if 'neck' in runner.arch_name:
raise NotImplementedError
if 'head' in runner.arch_name:
raise NotImplementedError
allreduce_grads(params, self.coalesce, self.bucket_size_mb)
#if self.grad_clip is not None:
# self.clip_grads(runner.model.parameters())
#runner.optimizer.step()
if runner.optimizer_arch is not None:
runner.optimizer_arch.step()
| Cream/CDARTS/CDARTS_detection/mmdet/core/utils/dist_utils.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/utils/dist_utils.py",
"repo_id": "Cream",
"token_count": 1517
} | 277 |
import inspect
import albumentations
import mmcv
import numpy as np
from albumentations import Compose
from imagecorruptions import corrupt
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..registry import PIPELINES
@PIPELINES.register_module
class Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- `ratio_range` is not None: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- `ratio_range` is None and `multiscale_mode` == "range": randomly sample a
scale from the a range.
- `ratio_range` is None and `multiscale_mode` == "value": randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
img_shape = results['img_shape']
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
def _resize_masks(self, results):
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
masks = [
mmcv.imrescale(
mask, results['scale_factor'], interpolation='nearest')
for mask in results[key]
]
else:
mask_size = (results['img_shape'][1], results['img_shape'][0])
masks = [
mmcv.imresize(mask, mask_size, interpolation='nearest')
for mask in results[key]
]
results[key] = masks
def __call__(self, results):
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
'keep_ratio={})').format(self.img_scale,
self.multiscale_mode,
self.ratio_range,
self.keep_ratio)
return repr_str
@PIPELINES.register_module
class RandomFlip(object):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
flip_ratio (float, optional): The flipping probability.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
self.flip_ratio = flip_ratio
self.direction = direction
if flip_ratio is not None:
assert flip_ratio >= 0 and flip_ratio <= 1
assert direction in ['horizontal', 'vertical']
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4] - 1
flipped[..., 3::4] = h - bboxes[..., 1::4] - 1
else:
raise ValueError(
'Invalid flipping direction "{}"'.format(direction))
return flipped
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'],
results['flip_direction'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = [
mmcv.imflip(mask, direction=results['flip_direction'])
for mask in results[key]
]
return results
def __repr__(self):
return self.__class__.__name__ + '(flip_ratio={})'.format(
self.flip_ratio)
@PIPELINES.register_module
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
if self.size is not None:
padded_img = mmcv.impad(results['img'], self.size)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
padded_masks = [
mmcv.impad(mask, pad_shape, pad_val=self.pad_val)
for mask in results[key]
]
results[key] = np.stack(padded_masks, axis=0)
def __call__(self, results):
self._pad_img(results)
self._pad_masks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
self.size, self.size_divisor, self.pad_val)
return repr_str
@PIPELINES.register_module
class Normalize(object):
"""Normalize the image.
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mean={}, std={}, to_rgb={})'.format(
self.mean, self.std, self.to_rgb)
return repr_str
@PIPELINES.register_module
class RandomCrop(object):
"""Random crop the image & bboxes & masks.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, results):
img = results['img']
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
# filter out the gt bboxes that are completely cropped
if 'gt_bboxes' in results:
gt_bboxes = results['gt_bboxes']
valid_inds = (gt_bboxes[:, 2] > gt_bboxes[:, 0]) & (
gt_bboxes[:, 3] > gt_bboxes[:, 1])
# if no gt bbox remains after cropping, just skip this image
if not np.any(valid_inds):
return None
results['gt_bboxes'] = gt_bboxes[valid_inds, :]
if 'gt_labels' in results:
results['gt_labels'] = results['gt_labels'][valid_inds]
# filter and crop the masks
if 'gt_masks' in results:
valid_gt_masks = []
for i in np.where(valid_inds)[0]:
gt_mask = results['gt_masks'][i][crop_y1:crop_y2,
crop_x1:crop_x2]
valid_gt_masks.append(gt_mask)
results['gt_masks'] = valid_gt_masks
return results
def __repr__(self):
return self.__class__.__name__ + '(crop_size={})'.format(
self.crop_size)
@PIPELINES.register_module
class SegResizeFlipPadRescale(object):
"""A sequential transforms to semantic segmentation maps.
The same pipeline as input images is applied to the semantic segmentation
map, and finally rescale it by some scale factor. The transforms include:
1. resize
2. flip
3. pad
4. rescale (so that the final size can be different from the image size)
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, results):
if results['keep_ratio']:
gt_seg = mmcv.imrescale(
results['gt_semantic_seg'],
results['scale'],
interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results['gt_semantic_seg'],
results['scale'],
interpolation='nearest')
if results['flip']:
gt_seg = mmcv.imflip(gt_seg)
if gt_seg.shape != results['pad_shape']:
gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2])
if self.scale_factor != 1:
gt_seg = mmcv.imrescale(
gt_seg, self.scale_factor, interpolation='nearest')
results['gt_semantic_seg'] = gt_seg
return results
def __repr__(self):
return self.__class__.__name__ + '(scale_factor={})'.format(
self.scale_factor)
@PIPELINES.register_module
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, results):
img = results['img']
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(brightness_delta={}, contrast_range={}, '
'saturation_range={}, hue_delta={})').format(
self.brightness_delta, self.contrast_range,
self.saturation_range, self.hue_delta)
return repr_str
@PIPELINES.register_module
class Expand(object):
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean=(0, 0, 0),
to_rgb=True,
ratio_range=(1, 4),
seg_ignore_label=None,
prob=0.5):
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
def __call__(self, results):
if random.uniform(0, 1) > self.prob:
return results
img, boxes = [results[k] for k in ('img', 'gt_bboxes')]
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
boxes = boxes + np.tile((left, top), 2).astype(boxes.dtype)
results['img'] = expand_img
results['gt_bboxes'] = boxes
if 'gt_masks' in results:
expand_gt_masks = []
for mask in results['gt_masks']:
expand_mask = np.full((int(h * ratio), int(w * ratio)),
0).astype(mask.dtype)
expand_mask[top:top + h, left:left + w] = mask
expand_gt_masks.append(expand_mask)
results['gt_masks'] = expand_gt_masks
# not tested
if 'gt_semantic_seg' in results:
assert self.seg_ignore_label is not None
gt_seg = results['gt_semantic_seg']
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label).astype(gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results['gt_semantic_seg'] = expand_gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mean={}, to_rgb={}, ratio_range={}, ' \
'seg_ignore_label={})'.format(
self.mean, self.to_rgb, self.ratio_range,
self.seg_ignore_label)
return repr_str
@PIPELINES.register_module
class MinIoURandomCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
"""
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
def __call__(self, results):
img, boxes, labels = [
results[k] for k in ('img', 'gt_bboxes', 'gt_labels')
]
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) * (center[:, 1] < patch[3]))
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results['img'] = img
results['gt_bboxes'] = boxes
results['gt_labels'] = labels
if 'gt_masks' in results:
valid_masks = [
results['gt_masks'][i] for i in range(len(mask))
if mask[i]
]
results['gt_masks'] = [
gt_mask[patch[1]:patch[3], patch[0]:patch[2]]
for gt_mask in valid_masks
]
# not tested
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = results['gt_semantic_seg'][
patch[1]:patch[3], patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(min_ious={}, min_crop_size={})'.format(
self.min_ious, self.min_crop_size)
return repr_str
@PIPELINES.register_module
class Corrupt(object):
def __init__(self, corruption, severity=1):
self.corruption = corruption
self.severity = severity
def __call__(self, results):
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(corruption={}, severity={})'.format(
self.corruption, self.severity)
return repr_str
@PIPELINES.register_module
class Albu(object):
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
"""
Adds custom transformations from Albumentations lib.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
transforms (list): list of albu transformations
bbox_params (dict): bbox_params for albumentation `Compose`
keymap (dict): contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): whether to skip the image
if no ann left after aug
"""
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg):
"""Import a module from albumentations.
Inherits some of `build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and "type" in cfg
args = cfg.copy()
obj_type = args.pop("type")
if mmcv.is_str(obj_type):
obj_cls = getattr(albumentations, obj_type)
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
'type must be a str or valid type, but got {}'.format(
type(obj_type)))
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d, keymap):
"""
Dictionary mapper.
Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
def __call__(self, results):
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
if 'bboxes' in results:
# to list of boxes
if isinstance(results['bboxes'], np.ndarray):
results['bboxes'] = [x for x in results['bboxes']]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
results = self.aug(**results)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
# filter label_fields
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
results['masks'] = [
results['masks'][i] for i in results['idx_mapper']
]
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
if 'gt_labels' in results:
if isinstance(results['gt_labels'], list):
results['gt_labels'] = np.array(results['gt_labels'])
# back to the original format
results = self.mapper(results, self.keymap_back)
# update final shape
if self.update_pad_shape:
results['pad_shape'] = results['img'].shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(transformations={})'.format(self.transformations)
return repr_str
| Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/transforms.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/transforms.py",
"repo_id": "Cream",
"token_count": 15385
} | 278 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import AnchorGenerator, anchor_target, multi_apply
from .anchor_head import AnchorHead
from ..losses import smooth_l1_loss
from ..registry import HEADS
# TODO: add loss evaluator for SSD
@HEADS.register_module
class SSDHead(AnchorHead):
def __init__(self,
input_size=300,
num_classes=81,
in_channels=(512, 1024, 512, 256, 256, 256),
anchor_strides=(8, 16, 32, 64, 100, 300),
basesize_ratio_range=(0.1, 0.9),
anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)):
super(AnchorHead, self).__init__()
self.input_size = input_size
self.num_classes = num_classes
self.in_channels = in_channels
self.cls_out_channels = num_classes
num_anchors = [len(ratios) * 2 + 2 for ratios in anchor_ratios]
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * 4,
kernel_size=3,
padding=1))
cls_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * num_classes,
kernel_size=3,
padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (len(in_channels) - 2))
min_sizes = []
max_sizes = []
for r in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(input_size * r / 100))
max_sizes.append(int(input_size * (r + step) / 100))
if input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(input_size * 7 / 100))
max_sizes.insert(0, int(input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(input_size * 10 / 100))
max_sizes.insert(0, int(input_size * 20 / 100))
elif input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(input_size * 4 / 100))
max_sizes.insert(0, int(input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(input_size * 7 / 100))
max_sizes.insert(0, int(input_size * 15 / 100))
self.anchor_generators = []
self.anchor_strides = anchor_strides
for k in range(len(anchor_strides)):
base_size = min_sizes[k]
stride = anchor_strides[k]
ctr = ((stride - 1) / 2., (stride - 1) / 2.)
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
ratios = [1.]
for r in anchor_ratios[k]:
ratios += [1 / r, r] # 4 or 6 ratio
anchor_generator = AnchorGenerator(
base_size, scales, ratios, scale_major=False, ctr=ctr)
indices = list(range(len(ratios)))
indices.insert(1, len(indices))
anchor_generator.base_anchors = torch.index_select(
anchor_generator.base_anchors, 0, torch.LongTensor(indices))
self.anchor_generators.append(anchor_generator)
self.target_means = target_means
self.target_stds = target_stds
self.use_sigmoid_cls = False
self.cls_focal_loss = False
self.fp16_enabled = False
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform', bias=0)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
self.cls_convs):
cls_scores.append(cls_conv(feat))
bbox_preds.append(reg_conv(feat))
return cls_scores, bbox_preds
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples, cfg):
loss_cls_all = F.cross_entropy(
cls_score, labels, reduction='none') * label_weights
pos_inds = (labels > 0).nonzero().view(-1)
neg_inds = (labels == 0).nonzero().view(-1)
num_pos_samples = pos_inds.size(0)
num_neg_samples = cfg.neg_pos_ratio * num_pos_samples
if num_neg_samples > neg_inds.size(0):
num_neg_samples = neg_inds.size(0)
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
loss_bbox = smooth_l1_loss(
bbox_pred,
bbox_targets,
bbox_weights,
beta=cfg.smoothl1_beta,
avg_factor=num_total_samples)
return loss_cls[None], loss_bbox
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas)
cls_reg_targets = anchor_target(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=1,
sampling=False,
unmap_outputs=False)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([
s.permute(0, 2, 3, 1).reshape(
num_images, -1, self.cls_out_channels) for s in cls_scores
], 1)
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
all_label_weights = torch.cat(label_weights_list,
-1).view(num_images, -1)
all_bbox_preds = torch.cat([
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
for b in bbox_preds
], -2)
all_bbox_targets = torch.cat(bbox_targets_list,
-2).view(num_images, -1, 4)
all_bbox_weights = torch.cat(bbox_weights_list,
-2).view(num_images, -1, 4)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
all_cls_scores,
all_bbox_preds,
all_labels,
all_label_weights,
all_bbox_targets,
all_bbox_weights,
num_total_samples=num_total_pos,
cfg=cfg)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
| Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py",
"repo_id": "Cream",
"token_count": 4283
} | 279 |
import math
import torch.nn as nn
from mmdet.ops import DeformConv, ModulatedDeformConv
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
class Bottleneck(_Bottleneck):
def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes * (base_width / 64)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.get('fallback_on_stride', False)
self.with_modulated_dcn = self.dcn.get('modulated', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
groups = self.dcn.get('groups', 1)
deformable_groups = self.dcn.get('deformable_groups', 1)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv2_offset = nn.Conv2d(
width,
deformable_groups * offset_channels,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation)
self.conv2 = conv_op(
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
groups=1,
base_width=4,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
groups=self.groups,
base_width=self.base_width,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
gcb=gcb)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
| Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/resnext.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/resnext.py",
"repo_id": "Cream",
"token_count": 4190
} | 280 |
from .two_stage import TwoStageDetector
from ..registry import DETECTORS
@DETECTORS.register_module
class FastRCNN(TwoStageDetector):
def __init__(self,
backbone,
bbox_roi_extractor,
bbox_head,
train_cfg,
test_cfg,
neck=None,
shared_head=None,
mask_roi_extractor=None,
mask_head=None,
pretrained=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
shared_head=shared_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
return self.aug_test(imgs, img_metas, proposals, **kwargs)
| Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/fast_rcnn.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/fast_rcnn.py",
"repo_id": "Cream",
"token_count": 969
} | 281 |
import torch.nn as nn
import torch.nn.functional as F
from mmdet.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from .utils import weight_reduce_loss
from ..registry import LOSSES
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred, target, gamma, alpha)
# TODO: find a proper way to handle the shape of weight
if weight is not None:
weight = weight.view(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| Cream/CDARTS/CDARTS_detection/mmdet/models/losses/focal_loss.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/focal_loss.py",
"repo_id": "Cream",
"token_count": 1448
} | 282 |
import warnings
import torch.nn as nn
from mmcv.cnn import kaiming_init, constant_init
from .conv_ws import ConvWS2d
from .norm import build_norm_layer
from .quant_conv import QuantConv
conv_cfg = {
'Conv': nn.Conv2d,
'ConvWS': ConvWS2d,
# TODO: octave conv
'QuantConv': QuantConv,
}
def build_conv_layer(cfg, *args, **kwargs):
""" Build convolution layer
Args:
cfg (None or dict): cfg should contain:
type (str): identify conv layer type.
layer args: args needed to instantiate a conv layer.
Returns:
layer (nn.Module): created conv layer
"""
if cfg is None:
cfg_ = dict(type='Conv')
else:
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in conv_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
class ConvModule(nn.Module):
"""Conv-Norm-Activation block.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str or None): Activation type, "ReLU" by default.
inplace (bool): Whether to use inplace mode for activation.
bottle_first (bool): Whether to apply the activation layer in the
last. (Do not use this flag since the behavior and api may be
changed in the future.)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=None,
norm_cfg=None,
activation='relu',
inplace=True,
bottle_first='conv'):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.bottle_first = bottle_first
self.with_norm = norm_cfg is not None
self.with_activatation = activation is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
# build convolution layer
self.conv = build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
norm_channels = in_channels if self.bottle_first == 'bn' else out_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_activatation:
if self.activation not in ['relu']:
raise ValueError('{} is currently not supported.'.format(
self.activation))
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=inplace)
# Use msra init by default
self.init_weights()
@property
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
nonlinearity = 'relu' if self.activation is None else self.activation
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
if self.bottle_first == 'conv':
x = self.conv(x)
if norm and self.with_norm:
x = self.norm(x)
if activate and self.with_activatation:
x = self.activate(x)
elif self.bottle_first == 'relu':
if activate and self.with_activatation:
x = self.activate(x)
x = self.conv(x)
if norm and self.with_norm:
x = self.norm(x)
elif self.bottle_first == 'bn':
if norm and self.with_norm:
x = self.norm(x)
if activate and self.with_activatation:
x = self.activate(x)
x = self.conv(x)
else:
raise KeyError('bottle_first is invalid.')
return x | Cream/CDARTS/CDARTS_detection/mmdet/models/utils/conv_module.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/utils/conv_module.py",
"repo_id": "Cream",
"token_count": 2780
} | 283 |
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
| Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu",
"repo_id": "Cream",
"token_count": 19385
} | 284 |
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <torch/extension.h>
template <typename scalar_t>
at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) {
AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
if (dets.numel() == 0) {
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
}
auto x1_t = dets.select(1, 0).contiguous();
auto y1_t = dets.select(1, 1).contiguous();
auto x2_t = dets.select(1, 2).contiguous();
auto y2_t = dets.select(1, 3).contiguous();
auto scores = dets.select(1, 4).contiguous();
at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto ndets = dets.size(0);
at::Tensor suppressed_t =
at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU));
auto suppressed = suppressed_t.data<uint8_t>();
auto order = order_t.data<int64_t>();
auto x1 = x1_t.data<scalar_t>();
auto y1 = y1_t.data<scalar_t>();
auto x2 = x2_t.data<scalar_t>();
auto y2 = y2_t.data<scalar_t>();
auto areas = areas_t.data<scalar_t>();
for (int64_t _i = 0; _i < ndets; _i++) {
auto i = order[_i];
if (suppressed[i] == 1) continue;
auto ix1 = x1[i];
auto iy1 = y1[i];
auto ix2 = x2[i];
auto iy2 = y2[i];
auto iarea = areas[i];
for (int64_t _j = _i + 1; _j < ndets; _j++) {
auto j = order[_j];
if (suppressed[j] == 1) continue;
auto xx1 = std::max(ix1, x1[j]);
auto yy1 = std::max(iy1, y1[j]);
auto xx2 = std::min(ix2, x2[j]);
auto yy2 = std::min(iy2, y2[j]);
auto w = std::max(static_cast<scalar_t>(0), xx2 - xx1 + 1);
auto h = std::max(static_cast<scalar_t>(0), yy2 - yy1 + 1);
auto inter = w * h;
auto ovr = inter / (iarea + areas[j] - inter);
if (ovr >= threshold) suppressed[j] = 1;
}
}
return at::nonzero(suppressed_t == 0).squeeze(1);
}
at::Tensor nms(const at::Tensor& dets, const float threshold) {
at::Tensor result;
AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] {
result = nms_cpu_kernel<scalar_t>(dets, threshold);
});
return result;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("nms", &nms, "non-maximum suppression");
} | Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_cpu.cpp/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_cpu.cpp",
"repo_id": "Cream",
"token_count": 1055
} | 285 |
from .collect_env import collect_env
from .flops_counter import get_model_complexity_info
from .logger import get_root_logger, print_log
from .registry import Registry, build_from_cfg
__all__ = [
'Registry', 'build_from_cfg', 'get_model_complexity_info',
'get_root_logger', 'print_log', 'collect_env'
] | Cream/CDARTS/CDARTS_detection/mmdet/utils/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/__init__.py",
"repo_id": "Cream",
"token_count": 110
} | 286 |
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| Cream/CDARTS/CDARTS_detection/tools/publish_model.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_detection/tools/publish_model.py",
"repo_id": "Cream",
"token_count": 361
} | 287 |
import os
import numpy as np
import scipy.misc as m
from PIL import Image
from torch.utils import data
from torchvision import transforms
from dataloaders import custom_transforms as tr
import pandas as pd
class CityscapesSegmentation(data.Dataset):
NUM_CLASSES = 7
def __init__(self, args, root, split="train"):
self.root = root
self.split = split
self.args = args
self.files = {}
self.images_base = os.path.join(self.root, 'kd-cityscapes-sources', self.split)
self.annotations_base = os.path.join(self.root, 'kd-cityscapes-gt', self.split)
self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png')
self.map = pd.read_csv('label_map.txt', header=0, sep='\t')
self.map['#id'] = self.map['#id'] + 6
self.dict_map = dict(zip(self.map['#id'],self.map['categoryId']))
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path))
_img = Image.open(img_path).convert('RGB')
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
_tmp = self.encode_segmap(_tmp)
_target = Image.fromarray(_tmp)
sample = {'image': _img, 'label': _target}
if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
elif self.split == 'test':
return self.transform_ts(sample)
def encode_segmap(self, mask):
mask = mask + 6
for label_id, cl in self.dict_map.items():
mask[mask == label_id] = cl
return mask
def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_ts(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
if __name__ == '__main__':
from dataloaders.dataloader_utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
cityscapes_train = CityscapesSegmentation(args, split='train')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/kd.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/kd.py",
"repo_id": "Cream",
"token_count": 2218
} | 288 |
import os
from yacs.config import CfgNode as CN
_C = CN()
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
_C.OUTPUT_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
# Logging frequency
_C.PRINT_FREQ = 20
# Checkpoint frequency
_C.CKPT_FREQ = 5000
# -----------------------------------------------------------------------------
# CUDNN
# -----------------------------------------------------------------------------
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# -----------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = 'panoptic_deeplab'
# pretrained model (including decoder, head, etc) on other dataset
# need to do a net surgery to remove classifiers etc.
_C.MODEL.WEIGHTS = ''
_C.MODEL.BN_MOMENTUM = 0.1
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# META could be
# resnet
# mobilenet_v2
# mnasnet
_C.MODEL.BACKBONE.META = 'resnet'
# NAME could be
# For resnet:
# 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
# For mobilenet_v2:
# 'mobilenet_v2'
# For mnasnet:
# 'mnasnet0_5', 'mnasnet0_75' (no official weight), 'mnasnet1_0', 'mnasnet1_3' (no official weight)
_C.MODEL.BACKBONE.NAME = "resnet50"
# Controls output stride
_C.MODEL.BACKBONE.DILATION = (False, False, True)
# pretrained backbone provided by official PyTorch modelzoo
_C.MODEL.BACKBONE.PRETRAINED = True
_C.MODEL.BACKBONE.WEIGHTS = ''
# Low-level feature key
# For resnet backbone:
# res2: 256
# res3: 512
# res4: 1024
# res5: 2048
# For mobilenet_v2 backbone:
# layer_4: 24
# layer_7: 32
# layer_14: 96
# layer_18: 320
# For mnasnet backbone:
# layer_9: 24 (0_5: 16)
# layer_10: 40 (0_5: 24)
# layer_12: 96 (0_5: 48)
# layer_14: 320 (0_5: 160)
# ---------------------------------------------------------------------------- #
# Decoder options
# ---------------------------------------------------------------------------- #
_C.MODEL.DECODER = CN()
_C.MODEL.DECODER.IN_CHANNELS = 2048
_C.MODEL.DECODER.FEATURE_KEY = 'res5'
_C.MODEL.DECODER.DECODER_CHANNELS = 256
_C.MODEL.DECODER.ATROUS_RATES = (6, 12, 18)
# TODO: pass these into the decoder.
_C.MODEL.DECODER.CONV_TYPE = 'depthwise_separable_conv'
_C.MODEL.DECODER.CONV_KERNEL = 5
_C.MODEL.DECODER.CONV_PADDING = 2
_C.MODEL.DECODER.CONV_STACK = 1
# ---------------------------------------------------------------------------- #
# DeepLabV3+ options
# ---------------------------------------------------------------------------- #
_C.MODEL.DEEPLABV3PLUS = CN()
_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS = 256
_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_KEY = 'res2'
_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS_PROJECT = 48
# ---------------------------------------------------------------------------- #
# Panoptic-DeepLab options
# ---------------------------------------------------------------------------- #
_C.MODEL.PANOPTIC_DEEPLAB = CN()
_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS = (512, 256)
_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_KEY = ('res3', 'res2')
_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS_PROJECT = (64, 32)
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE = CN()
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ENABLE = False
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.LOW_LEVEL_CHANNELS_PROJECT = (32, 16)
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.DECODER_CHANNELS = 128
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.HEAD_CHANNELS = 128
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ASPP_CHANNELS = 256
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.NUM_CLASSES = (1, 2)
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.CLASS_KEY = ('center', 'offset')
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.FOREGROUND_SEG = False
_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.FOREGROUND_ARCH = 'v1'
# -----------------------------------------------------------------------------
# DATASET
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.ROOT = './datasets/cityscapes'
_C.DATASET.DATASET = 'cityscapes'
_C.DATASET.NUM_CLASSES = 19
_C.DATASET.TRAIN_SPLIT = 'train'
_C.DATASET.TEST_SPLIT = 'val'
_C.DATASET.CROP_SIZE = (513, 1025)
_C.DATASET.MIRROR = True
_C.DATASET.MIN_SCALE = 0.5
_C.DATASET.MAX_SCALE = 2.0
_C.DATASET.SCALE_STEP_SIZE = 0.1
_C.DATASET.MEAN = (0.485, 0.456, 0.406)
_C.DATASET.STD = (0.229, 0.224, 0.225)
_C.DATASET.SEMANTIC_ONLY = False
_C.DATASET.IGNORE_STUFF_IN_OFFSET = True
_C.DATASET.SMALL_INSTANCE_AREA = 0
_C.DATASET.SMALL_INSTANCE_WEIGHT = 1
_C.DATASET.MIN_RESIZE_VALUE = -1
_C.DATASET.MAX_RESIZE_VALUE = -1
_C.DATASET.RESIZE_FACTOR = -1
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
_C.SOLVER = CN()
_C.SOLVER.BASE_LR = 0.01
_C.SOLVER.WEIGHT_DECAY = 0.0001
# Weight decay of norm layers.
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
# Bias.
_C.SOLVER.BIAS_LR_FACTOR = 2.0
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.OPTIMIZER = 'sgd'
_C.SOLVER.ADAM_BETAS = (0.9, 0.999)
_C.SOLVER.ADAM_EPS = 1e-08
_C.SOLVER.LR_SCHEDULER_NAME = 'WarmupPolyLR'
# The iteration number to decrease learning rate by GAMMA.
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.POLY_LR_POWER = 0.9
_C.SOLVER.POLY_LR_CONSTANT_ENDING = 0
_C.SOLVER.CLIP_GRADIENTS = CN()
_C.SOLVER.CLIP_GRADIENTS.ENABLED = False
# Type of gradient clipping, currently 2 values are supported:
# - "value": the absolute values of elements of each gradients are clipped
# - "norm": the norm of the gradient for each parameter is clipped thus
# affecting all elements in the parameter
_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
# Maximum absolute value used for clipping gradients
_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
# Floating point number p for L-p norm to be used with the "norm"
# gradient clipping type; for L-inf, please specify .inf
_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
# -----------------------------------------------------------------------------
# Loss
# -----------------------------------------------------------------------------
_C.LOSS = CN()
_C.LOSS.SEMANTIC = CN()
_C.LOSS.SEMANTIC.NAME = 'cross_entropy'
# TODO: make `ignore` more consistent
_C.LOSS.SEMANTIC.IGNORE = 255
_C.LOSS.SEMANTIC.REDUCTION = 'mean'
_C.LOSS.SEMANTIC.THRESHOLD = 0.7
_C.LOSS.SEMANTIC.MIN_KEPT = 100000
_C.LOSS.SEMANTIC.TOP_K_PERCENT = 1.0
_C.LOSS.SEMANTIC.WEIGHT = 1.0
_C.LOSS.CENTER = CN()
_C.LOSS.CENTER.NAME = 'mse'
_C.LOSS.CENTER.REDUCTION = 'none'
_C.LOSS.CENTER.WEIGHT = 200.0
_C.LOSS.OFFSET = CN()
_C.LOSS.OFFSET.NAME = 'l1'
_C.LOSS.OFFSET.REDUCTION = 'none'
_C.LOSS.OFFSET.WEIGHT = 0.01
_C.LOSS.FOREGROUND = CN()
_C.LOSS.FOREGROUND.NAME = 'cross_entropy'
_C.LOSS.FOREGROUND.IGNORE = 255
_C.LOSS.FOREGROUND.REDUCTION = 'mean'
_C.LOSS.FOREGROUND.THRESHOLD = 0.7
_C.LOSS.FOREGROUND.MIN_KEPT = 100000
_C.LOSS.FOREGROUND.TOP_K_PERCENT = 1.0
_C.LOSS.FOREGROUND.WEIGHT = 1.0
# -----------------------------------------------------------------------------
# TRAIN
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.IMS_PER_BATCH = 32
_C.TRAIN.MAX_ITER = 90000
_C.TRAIN.RESUME = False
# -----------------------------------------------------------------------------
# DATALOADER
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
_C.DATALOADER.SAMPLER_TRAIN = 'TrainingSampler'
_C.DATALOADER.TRAIN_SHUFFLE = True
_C.DATALOADER.NUM_WORKERS = 4
# -----------------------------------------------------------------------------
# DEBUG
# -----------------------------------------------------------------------------
_C.DEBUG = CN()
_C.DEBUG.DEBUG = True
_C.DEBUG.DEBUG_FREQ = 100
_C.DEBUG.TARGET_KEYS = ('semantic', 'center', 'offset', 'semantic_weights', 'center_weights', 'offset_weights')
_C.DEBUG.OUTPUT_KEYS = ('semantic', 'center', 'offset')
_C.DEBUG.KEEP_INTERVAL = 1000
# -----------------------------------------------------------------------------
# TEST
# -----------------------------------------------------------------------------
_C.TEST = CN()
_C.TEST.GPUS = (0, )
_C.TEST.CROP_SIZE = (1025, 2049)
_C.TEST.SEMANTIC_FOLDER = 'semantic'
_C.TEST.INSTANCE_FOLDER = 'instance'
_C.TEST.PANOPTIC_FOLDER = 'panoptic'
_C.TEST.FOREGROUND_FOLDER = 'foreground'
_C.TEST.EVAL_INSTANCE = False
_C.TEST.EVAL_PANOPTIC = False
_C.TEST.EVAL_FOREGROUND = False
_C.TEST.MODEL_FILE = ''
_C.TEST.TEST_TIME_AUGMENTATION = False
_C.TEST.FLIP_TEST = False
_C.TEST.SCALE_LIST = [1]
_C.TEST.DEBUG = False
_C.TEST.ORACLE_SEMANTIC = False
_C.TEST.ORACLE_FOREGROUND = False
_C.TEST.ORACLE_CENTER = False
_C.TEST.ORACLE_OFFSET = False
_C.TEST.INSTANCE_SCORE_TYPE = "semantic"
# -----------------------------------------------------------------------------
# POST PROCESSING
# Panoptic post-processing params
# -----------------------------------------------------------------------------
_C.POST_PROCESSING = CN()
_C.POST_PROCESSING.CENTER_THRESHOLD = 0.1
_C.POST_PROCESSING.NMS_KERNEL = 7
_C.POST_PROCESSING.TOP_K_INSTANCE = 200
_C.POST_PROCESSING.STUFF_AREA = 2048
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
| Cream/CDARTS/CDARTS_segmentation/segmentation/config/default.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/config/default.py",
"repo_id": "Cream",
"token_count": 3736
} | 289 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py
# Modified by Bowen Cheng (bcheng9@illinois.edu)
# ------------------------------------------------------------------------------
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ["ASPP"]
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Module):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__()
self.aspp_pooling = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.ReLU()
)
def set_image_pooling(self, pool_size=None):
if pool_size is None:
self.aspp_pooling[0] = nn.AdaptiveAvgPool2d(1)
else:
self.aspp_pooling[0] = nn.AvgPool2d(kernel_size=pool_size, stride=1)
def forward(self, x):
size = x.shape[-2:]
x = self.aspp_pooling(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=True)
class ASPP(nn.Module):
def __init__(self, in_channels, out_channels, atrous_rates):
super(ASPP, self).__init__()
# out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5))
def set_image_pooling(self, pool_size):
self.convs[-1].set_image_pooling(pool_size)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
| Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/aspp.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/aspp.py",
"repo_id": "Cream",
"token_count": 1233
} | 290 |
from .build import build_optimizer, build_lr_scheduler
from .lr_scheduler import WarmupMultiStepLR, WarmupCosineLR, WarmupPolyLR
from .utils import get_lr_group_id
| Cream/CDARTS/CDARTS_segmentation/segmentation/solver/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/solver/__init__.py",
"repo_id": "Cream",
"token_count": 54
} | 291 |
from .bdd import BDD
__all__ = ['BDD']
| Cream/CDARTS/CDARTS_segmentation/tools/datasets/bdd/__init__.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/datasets/bdd/__init__.py",
"repo_id": "Cream",
"token_count": 18
} | 292 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from engine.logger import get_logger
logger = get_logger()
L1Loss = nn.L1Loss
MSELoss = nn.MSELoss
CrossEntropyLoss = nn.CrossEntropyLoss
class SigmoidFocalLoss(nn.Module):
def __init__(self, ignore_label, gamma=2.0, alpha=0.25,
reduction='mean'):
super(SigmoidFocalLoss, self).__init__()
self.ignore_label = ignore_label
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, pred, target):
b, h, w = target.size()
pred = pred.view(b, -1, 1)
pred_sigmoid = pred.sigmoid()
target = target.view(b, -1).float()
mask = (target.ne(self.ignore_label)).float()
target = mask * target
onehot = target.view(b, -1, 1)
max_val = (-pred_sigmoid).clamp(min=0)
pos_part = (1 - pred_sigmoid) ** self.gamma * (
pred_sigmoid - pred_sigmoid * onehot)
neg_part = pred_sigmoid ** self.gamma * (max_val + (
(-max_val).exp() + (-pred_sigmoid - max_val).exp()).log())
loss = -(self.alpha * pos_part + (1 - self.alpha) * neg_part).sum(
dim=-1) * mask
if self.reduction == 'mean':
loss = loss.mean()
return loss
class ProbOhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_label, reduction='mean', thresh=0.6, min_kept=256,
down_ratio=1, use_weight=False):
super(ProbOhemCrossEntropy2d, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.down_ratio = down_ratio
if use_weight:
weight = torch.FloatTensor(
[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507]).cuda()
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
weight=weight,
ignore_index=ignore_label)
else:
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,
ignore_index=ignore_label)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_label)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
logger.info('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[
target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
index = mask_prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
# logger.info('Valid Mask: {}'.format(valid_mask.sum()))
target = target.masked_fill_(~valid_mask, self.ignore_label)
target = target.view(b, h, w)
return self.criterion(pred, target)
class RegularCE(nn.Module):
"""
Regular cross entropy loss for semantic segmentation, support pixel-wise loss weight.
Arguments:
ignore_label: Integer, label to ignore.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, weight=None):
super(RegularCE, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
mask = labels.contiguous().view(-1) != self.ignore_label
pixel_losses = pixel_losses[mask]
return pixel_losses.mean()
class OhemCE(nn.Module):
"""
Online hard example mining with cross entropy loss, for semantic segmentation.
This is widely used in PyTorch semantic segmentation frameworks.
Reference: https://github.com/HRNet/HRNet-Semantic-Segmentation/blob/1b3ae72f6025bde4ea404305d502abea3c2f5266/lib/core/criterion.py#L29
Arguments:
ignore_label: Integer, label to ignore.
threshold: Float, threshold for softmax score (of gt class), only predictions with softmax score
below this threshold will be kept.
min_kept: Integer, minimum number of pixels to be kept, it is used to adjust the
threshold value to avoid number of examples being too small.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, threshold=0.7,
min_kept=100000, weight=None):
super(OhemCE, self).__init__()
self.threshold = threshold
self.min_kept = max(1, min_kept)
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
predictions = F.softmax(logits, dim=1)
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
mask = labels.contiguous().view(-1) != self.ignore_label
tmp_labels = labels.clone()
tmp_labels[tmp_labels == self.ignore_label] = 0
# Get the score for gt class at each pixel location.
predictions = predictions.gather(1, tmp_labels.unsqueeze(1))
predictions, indices = predictions.contiguous().view(-1, )[mask].contiguous().sort()
min_value = predictions[min(self.min_kept, predictions.numel() - 1)]
threshold = max(min_value, self.threshold)
pixel_losses = pixel_losses[mask][indices]
pixel_losses = pixel_losses[predictions < threshold]
return pixel_losses.mean()
class DeepLabCE(nn.Module):
"""
Hard pixel mining mining with cross entropy loss, for semantic segmentation.
This is used in TensorFlow DeepLab frameworks.
Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33
Arguments:
ignore_label: Integer, label to ignore.
top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its value < 1.0, only compute the loss for
the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None):
super(DeepLabCE, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, logits, labels, **kwargs):
if 'semantic_weights' in kwargs:
pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights']
pixel_losses = pixel_losses.contiguous().view(-1)
else:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
if self.top_k_percent_pixels == 1.0:
return pixel_losses.mean()
top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel())
pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels)
return pixel_losses.mean() | Cream/CDARTS/CDARTS_segmentation/tools/seg_opr/loss_opr.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/seg_opr/loss_opr.py",
"repo_id": "Cream",
"token_count": 4080
} | 293 |
# ------------------------------------------------------------------------------
# Adds `segmentation` package into Python path.
# Written by Bowen Cheng (bcheng9@illinois.edu)
# ------------------------------------------------------------------------------
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..')
add_path(lib_path)
add_path(this_dir)
add_path(osp.join(lib_path, 'tools'))
| Cream/CDARTS/CDARTS_segmentation/train/_init_paths.py/0 | {
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/_init_paths.py",
"repo_id": "Cream",
"token_count": 157
} | 294 |
## NAS-Bench-201
* Main python file is
```buildoutcfg
${ROOT}/benchmark201/search.py
```
* Here we present our search script on NAS-Bench-201.
```buildoutcfg
cd benchmark201
bash run_search_cifar_1gpu.sh
```
| Cream/CDARTS/benchmark201/README.md/0 | {
"file_path": "Cream/CDARTS/benchmark201/README.md",
"repo_id": "Cream",
"token_count": 99
} | 295 |
""" Search cell """
import os
import copy
import apex
import json
import torch
import time
import math
import torch.nn as nn
import numpy as np
import torch.distributed as dist
from tensorboardX import SummaryWriter
from models.cdarts_controller import CDARTSController
from utils.visualize import plot
from utils import utils
from datasets.data_utils import SubsetDistributedSampler
from core.search_function import search, retrain_warmup, validate
from nas_201_api import NASBench201API as API
from configs.config import SearchConfig
config = SearchConfig()
if 'cifar' in config.dataset:
from datasets.cifar import get_search_datasets
elif 'imagenet' in config.dataset:
from datasets.imagenet import get_search_datasets
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
if config.local_rank == 0:
config.print_params(logger.info)
try:
os.makedirs(config.plot_path)
except:
pass
if config.use_apex:
import apex
from apex.parallel import DistributedDataParallel as DDP
else:
DDP = torch.nn.parallel.DistributedDataParallel
def main():
logger.info("Logger is set - training start")
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# TODO
# api = None
api = API('/home/hongyuan/benchmark/NAS-Bench-201-v1_0-e61699.pth')
if config.distributed:
config.gpu = config.local_rank % torch.cuda.device_count()
torch.cuda.set_device(config.gpu)
# distributed init
torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url,
world_size=config.world_size, rank=config.local_rank)
config.world_size = torch.distributed.get_world_size()
config.total_batch_size = config.world_size * config.batch_size
else:
config.total_batch_size = config.batch_size
loaders, samplers = get_search_datasets(config)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
net_crit = nn.CrossEntropyLoss().cuda()
controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier)
resume_state = None
if config.resume:
resume_state = torch.load(config.resume_path, map_location='cpu')
if config.resume:
controller.load_state_dict(resume_state['controller'])
controller = controller.cuda()
if config.sync_bn:
if config.use_apex:
controller = apex.parallel.convert_syncbn_model(controller)
else:
controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller)
if config.use_apex:
controller = DDP(controller, delay_allreduce=True)
else:
controller = DDP(controller, device_ids=[config.gpu])
# warm up model_search
if config.ensemble_param:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers.parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.ensemble_param}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers.parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
# search training loop
sta_search_iter = 0
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min)
lr_scheduler_retrain = nn.ModuleList()
alpha_optim = nn.ModuleList()
optimizer = nn.ModuleList()
sub_epoch = 0
for search_iter in range(sta_search_iter, config.search_iter):
if search_iter < config.pretrain_epochs:
if config.local_rank == 0:
logger.info("####### Super model warmup #######")
train_sampler.set_epoch(search_iter)
retrain_warmup(train_loader, controller, w_optim, search_iter, writer, logger, True, config.pretrain_epochs, config)
#lr_scheduler.step()
else:
# build new controller
genotype = controller.module.genotype()
controller.module.build_nas_model(genotype)
controller_b = copy.deepcopy(controller.module)
del controller
controller = controller_b.cuda()
# sync params from super layer pool
controller.copy_params_from_super_layer()
if config.sync_bn:
if config.use_apex:
controller = apex.parallel.convert_syncbn_model(controller)
else:
controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller)
if config.use_apex:
controller = DDP(controller, delay_allreduce=True)
else:
controller = DDP(controller, device_ids=[config.gpu])
# weights optimizer
if config.ensemble_param:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers.parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.ensemble_param}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers.parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
# arch_params optimizer
alpha_optim = torch.optim.Adam(controller.module.arch_parameters(), config.alpha_lr, betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
if config.ensemble_param:
optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.nas_layers.parameters()},
{"params": controller.module.ensemble_param},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.fc_nas.parameters()}],
lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.nas_layers.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.fc_nas.parameters()}],
lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
lr_scheduler_retrain = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, config.search_iter_epochs, eta_min=config.w_lr_min)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min)
# warmup model main
if config.local_rank == 0:
logger.info("####### Sub model warmup #######")
for warmup_epoch in range(config.nasnet_warmup):
valid_sampler.set_epoch(warmup_epoch)
retrain_warmup(valid_loader, controller, optimizer, warmup_epoch, writer, logger, False, config.nasnet_warmup, config)
lr_search = lr_scheduler.get_lr()[0]
lr_main = lr_scheduler_retrain.get_lr()[0]
search_epoch = search_iter
# reset iterators
train_sampler.set_epoch(search_epoch)
valid_sampler.set_epoch(search_epoch)
# training
search(train_loader, valid_loader, controller, optimizer, w_optim, alpha_optim, search_epoch, writer, logger, config)
# sync params to super layer pool
controller.module.copy_params_from_nas_layer()
# nasbench201
if config.local_rank == 0:
logger.info('{}'.format(controller.module._arch_parameters))
result = api.query_by_arch(controller.module.genotype())
logger.info('{:}'.format(result))
cifar10_train, cifar10_test, cifar100_train, cifar100_valid, \
cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test = utils.distill(result)
writer.add_scalars('nasbench201/cifar10', {'train':cifar10_train,'test':cifar10_test}, search_epoch)
writer.add_scalars('nasbench201/cifar100', {'train':cifar100_train,'valid':cifar100_valid, 'test':cifar100_test}, search_epoch)
writer.add_scalars('nasbench201/imagenet16', {'train':imagenet16_train,'valid':imagenet16_valid, 'test':imagenet16_test}, search_epoch)
#lr_scheduler.step()
#lr_scheduler_retrain.step()
torch.cuda.empty_cache()
if __name__ == "__main__":
sta_time = time.time()
main()
search_time = time.time() - sta_time
search_hour = math.floor(search_time / 3600)
search_min = math.floor(search_time / 60 - search_hour * 60)
if config.local_rank==0:
logger.info("Search time: hour: {} minute: {}".format(search_hour, search_min))
| Cream/CDARTS/benchmark201/search.py/0 | {
"file_path": "Cream/CDARTS/benchmark201/search.py",
"repo_id": "Cream",
"token_count": 5741
} | 296 |
AUTO_RESUME: True
DATA_DIR: './data/imagenet'
MODEL: 'Childnet_Testing'
RESUME_PATH: './experiments/workspace/ckps/42.pth.tar'
SAVE_PATH: './experiments/workspace/test'
SEED: 42
LOG_INTERVAL: 50
RECOVERY_INTERVAL: 0
WORKERS: 4
NUM_GPU: 2
SAVE_IMAGES: False
AMP: False
OUTPUT: 'None'
EVAL_METRICS: 'prec1'
TTA: 0
LOCAL_RANK: 0
DATASET:
NUM_CLASSES: 1000
IMAGE_SIZE: 224 # image patch size
INTERPOLATION: 'bilinear' # Image resize interpolation type
BATCH_SIZE: 32 # batch size
NO_PREFECHTER: False
NET:
GP: 'avg'
DROPOUT_RATE: 0.0
SELECTION: 42
EMA:
USE: True
FORCE_CPU: False # force model ema to be tracked on CPU
DECAY: 0.9998
OPTIMIZER:
MOMENTUM: 0.9
WEIGHT_DECAY: 1e-3 | Cream/Cream/experiments/configs/test/test.yaml/0 | {
"file_path": "Cream/Cream/experiments/configs/test/test.yaml",
"repo_id": "Cream",
"token_count": 317
} | 297 |
from lib.utils.builder_util import *
from lib.models.builders.build_childnet import *
from timm.models.layers import SelectAdaptivePool2d
from timm.models.layers.activations import hard_sigmoid
# ChildNet Structures
class ChildNet(nn.Module):
def __init__(
self,
block_args,
num_classes=1000,
in_chans=3,
stem_size=16,
num_features=1280,
head_bias=True,
channel_multiplier=1.0,
pad_type='',
act_layer=nn.ReLU,
drop_rate=0.,
drop_path_rate=0.,
se_kwargs=None,
norm_layer=nn.BatchNorm2d,
norm_kwargs=None,
global_pool='avg',
logger=None,
verbose=False):
super(ChildNet, self).__init__()
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self._in_chs = in_chans
self.logger = logger
# Stem
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = create_conv2d(
self._in_chs, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
self._in_chs = stem_size
# Middle stages (IR/ER/DS Blocks)
builder = ChildNetBuilder(
channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs,
norm_layer, norm_kwargs, drop_path_rate, verbose=verbose)
self.blocks = nn.Sequential(*builder(self._in_chs, block_args))
# self.blocks = builder(self._in_chs, block_args)
self._in_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = create_conv2d(
self._in_chs,
self.num_features,
1,
padding=pad_type,
bias=head_bias)
self.act2 = act_layer(inplace=True)
# Classifier
self.classifier = nn.Linear(
self.num_features *
self.global_pool.feat_mult(),
self.num_classes)
efficientnet_init_weights(self)
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
self.classifier = nn.Linear(
self.num_features * self.global_pool.feat_mult(),
num_classes) if self.num_classes else None
def forward_features(self, x):
# architecture = [[0], [], [], [], [], [0]]
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x.flatten(1)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classifier(x)
return x
def gen_childnet(arch_list, arch_def, **kwargs):
# arch_list = [[0], [], [], [], [], [0]]
choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]}
choices_list = [[x, y] for x in choices['kernel_size']
for y in choices['exp_ratio']]
num_features = 1280
# act_layer = HardSwish
act_layer = Swish
new_arch = []
# change to child arch_def
for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)):
if len(layer_arch) == 1:
new_arch.append(layer_arch)
continue
else:
new_layer = []
for j, (block_choice, block_arch) in enumerate(
zip(layer_choice, layer_arch)):
kernel_size, exp_ratio = choices_list[block_choice]
elements = block_arch.split('_')
block_arch = block_arch.replace(
elements[2], 'k{}'.format(str(kernel_size)))
block_arch = block_arch.replace(
elements[4], 'e{}'.format(str(exp_ratio)))
new_layer.append(block_arch)
new_arch.append(new_layer)
model_kwargs = dict(
block_args=decode_arch_def(new_arch),
num_features=num_features,
stem_size=16,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=act_layer,
se_kwargs=dict(
act_layer=nn.ReLU,
gate_fn=hard_sigmoid,
reduce_mid=True,
divisor=8),
**kwargs,
)
model = ChildNet(**model_kwargs)
return model
| Cream/Cream/lib/models/structures/childnet.py/0 | {
"file_path": "Cream/Cream/lib/models/structures/childnet.py",
"repo_id": "Cream",
"token_count": 2414
} | 298 |
# EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention
:pushpin: This is an official PyTorch implementation of **[CVPR 2023]** - EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention
> [**EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention**]()<br>
> [Xinyu Liu](https://xinyuliu-jeffrey.github.io/), [Houwen Peng](https://houwenpeng.com/), [Ningxin Zheng](https://www.microsoft.com/en-us/research/people/nizhen/), [Yuqing Yang](https://www.microsoft.com/en-us/research/people/yuqyang/), [Han Hu](https://ancientmooner.github.io/), [Yixuan Yuan](http://www.ee.cuhk.edu.hk/~yxyuan/)<br>The Chinese Univerisity of Hong Kong, Microsoft Research Asia
**EfficientViT** is a family of high-speed vision transformers. It is built with a new memory efficient building block with a **sandwich layout**, and an efficient **cascaded group attention** operation which mitigates attention computation redundancy.
<div align="center">
<img width="80%" alt="EfficientViT overview" src="classification/.figures/efficientvit_main.gif"/>
</div>
## News
**[2023.5.11]** :newspaper: Code and pre-trained models of EfficientViT are released.
## Highlights
<div align="center">
<img width="80%" alt="EfficientViT speed" src="classification/.figures/modelACC_gpu.png"/><br>
Models are trained on ImageNet-1K and measured with V100 GPU.
</div>
<br>
:star: EfficientViT family shows better speed and accuracy.
* EfficientViT uses **sandwich layout** block to reduce memory time consumption and **cascaded group attention** to mitigate attention computation redundancy.
* EfficientViT-M0 with **63.2%** Top-1 accuracy achieves **27,644 images/s** on V100 GPU, **228.4 images/s** on Intel CPU, and **340.1 images/s** as onnx models.
* EfficientViT-M4 achieves **74.3%** Top-1 accuracy on ImageNet-1k, with **15,914 imgs/s** inference throughput under 224x224 resolutions, measured on the V100 GPU.
* EfficientViT-M5 trained for 300 epochs (~**30h** on 8 V100 GPUs) achieves **77.1%** Top-1 accuracy and **93.4%** Top-5 accuracy with a throughput of **10,621 images/s** on V100 GPU.
## Get Started
:beginner: We provide a simple way to use the pre-trained EfficientViT models directly:
```python
from classification.model.build import EfficientViT_M4
model = EfficientViT_M4(pretrained='efficientvit_m4')
out = model(image)
```
:hammer: Here we provide setup, evaluation, and training scripts for different tasks.
### Image Classification
Please refer to [Classification](./classification/README.md).
### Object Detection and Instance Segmentation
Please refer to [Downstream](./downstream/README.md).
## Citation
If you find our project is helpful, please feel free to leave a star and cite our paper:
```BibTeX
@InProceedings{liu2023efficientvit,
title = {EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention},
author = {Liu, Xinyu and Peng, Houwen and Zheng, Ningxin and Yang, Yuqing and Hu, Han and Yuan, Yixuan},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2023},
}
```
## Acknowledge
We sincerely appreciate [Swin Transformer](https://github.com/microsoft/swin-transformer), [LeViT](https://github.com/facebookresearch/LeViT), [pytorch-image-models](https://github.com/rwightman/pytorch-image-models), and [PyTorch](https://github.com/pytorch/pytorch) for their awesome codebases.
## License
- [License](./LICENSE)
| Cream/EfficientViT/README.md/0 | {
"file_path": "Cream/EfficientViT/README.md",
"repo_id": "Cream",
"token_count": 1125
} | 299 |
"""
Testing the speed of different models
"""
import os
import torch
import torchvision
import time
import timm
from model.build import EfficientViT_M0, EfficientViT_M1, EfficientViT_M2, EfficientViT_M3, EfficientViT_M4, EfficientViT_M5
import torchvision
import utils
torch.autograd.set_grad_enabled(False)
T0 = 10
T1 = 60
def compute_throughput_cpu(name, model, device, batch_size, resolution=224):
inputs = torch.randn(batch_size, 3, resolution, resolution, device=device)
# warmup
start = time.time()
while time.time() - start < T0:
model(inputs)
timing = []
while sum(timing) < T1:
start = time.time()
model(inputs)
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
print(name, device, batch_size / timing.mean().item(),
'images/s @ batch size', batch_size)
def compute_throughput_cuda(name, model, device, batch_size, resolution=224):
inputs = torch.randn(batch_size, 3, resolution, resolution, device=device)
torch.cuda.empty_cache()
torch.cuda.synchronize()
start = time.time()
with torch.cuda.amp.autocast():
while time.time() - start < T0:
model(inputs)
timing = []
if device == 'cuda:0':
torch.cuda.synchronize()
with torch.cuda.amp.autocast():
while sum(timing) < T1:
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
print(name, device, batch_size / timing.mean().item(),
'images/s @ batch size', batch_size)
for device in ['cuda:0', 'cpu']:
if 'cuda' in device and not torch.cuda.is_available():
print("no cuda")
continue
if device == 'cpu':
os.system('echo -n "nb processors "; '
'cat /proc/cpuinfo | grep ^processor | wc -l; '
'cat /proc/cpuinfo | grep ^"model name" | tail -1')
print('Using 1 cpu thread')
torch.set_num_threads(1)
compute_throughput = compute_throughput_cpu
else:
print(torch.cuda.get_device_name(torch.cuda.current_device()))
compute_throughput = compute_throughput_cuda
for n, batch_size0, resolution in [
('EfficientViT_M0', 2048, 224),
('EfficientViT_M1', 2048, 224),
('EfficientViT_M2', 2048, 224),
('EfficientViT_M3', 2048, 224),
('EfficientViT_M4', 2048, 224),
('EfficientViT_M5', 2048, 224),
]:
if device == 'cpu':
batch_size = 16
else:
batch_size = batch_size0
torch.cuda.empty_cache()
inputs = torch.randn(batch_size, 3, resolution,
resolution, device=device)
model = eval(n)(num_classes=1000)
utils.replace_batchnorm(model)
model.to(device)
model.eval()
model = torch.jit.trace(model, inputs)
compute_throughput(n, model, device,
batch_size, resolution=resolution)
| Cream/EfficientViT/classification/speed_test.py/0 | {
"file_path": "Cream/EfficientViT/classification/speed_test.py",
"repo_id": "Cream",
"token_count": 1400
} | 300 |
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
import sys
import efficientvit
import efficientvit_fpn
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| Cream/EfficientViT/downstream/test.py/0 | {
"file_path": "Cream/EfficientViT/downstream/test.py",
"repo_id": "Cream",
"token_count": 4144
} | 301 |
MODEL:
TYPE: swin
NAME: swin_small_patch4_window7_224
DROP_PATH_RATE: 0.3
SWIN:
EMBED_DIM: 96
DEPTHS: [ 2, 2, 18, 2 ]
NUM_HEADS: [ 3, 6, 12, 24 ]
WINDOW_SIZE: 7 | Cream/MiniViT/Mini-Swin/configs/swin_small_patch4_window7_224.yaml/0 | {
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_small_patch4_window7_224.yaml",
"repo_id": "Cream",
"token_count": 102
} | 302 |
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_, to_2tuple, DropPath
from .swin_transformer import Mlp, window_partition, window_reverse, PatchEmbed, PatchMerging
import torch.utils.checkpoint as checkpoint
class WindowAttentionDISTILL(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv_tmp = self.qkv(x)
qkv = qkv_tmp.reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
## output q, k, v
qkv_out = qkv_tmp.reshape(B_, N, 3, C).permute(2, 0, 1, 3)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) #B, nHead, Wh*Ww, Wh*Ww
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, (qkv_out[0], qkv_out[1], qkv_out[2])
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlockDISTILL(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttentionDISTILL(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows, qkv_tuple = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C; (B x N x C, B x N x C, B x N x C)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, qkv_tuple
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class BasicLayerDISTILL(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlockDISTILL(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
qkv_tuple_list = []
hidden_tuple_list = []
for blk in self.blocks:
if self.use_checkpoint:
x, qkv_tuple = checkpoint.checkpoint(blk, x)
else:
x, qkv_tuple = blk(x)
qkv_tuple_list.append(qkv_tuple)
hidden_tuple_list.append(x)
if self.downsample is not None:
x = self.downsample(x)
return x, qkv_tuple_list, hidden_tuple_list
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class SwinTransformerDISTILL(nn.Module):
r""" Swin Transformer for Self-Attention and Hidden-State Distillation.
The model structure is the same as the official Swin Transformer.
It will return extra outputs of self-attention and hidden state.
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False,
## The following parameters are for distillation
is_student = False,
fit_size_C = 128,
**kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
print("Dropout path: ", dpr)
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayerDISTILL(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.is_student = is_student
self.fit_size_C = fit_size_C
self.fit_dense_C = nn.ModuleList()
if self.is_student:
for i_layer in range(self.num_layers):
self.fit_dense_C.append(nn.Linear(int(embed_dim * 2 ** i_layer), int(fit_size_C* 2 ** i_layer)))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x, layer_id_list=[], is_hidden_org=True):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
layer_id = 0
qkv_tuple_return_list = []
hidden_tuple_return_list = []
for id, layer in enumerate(self.layers):
x, qkv_tuple_list, hidden_tuple_list = layer(x)
for index in range(len(qkv_tuple_list)):
if index + layer_id in layer_id_list:
qkv_tuple_return_list.append(qkv_tuple_list[index])
if self.is_student and not is_hidden_org:
hidden_tuple_return_list.append(self.fit_dense_C[id](hidden_tuple_list[index]))
else:
hidden_tuple_return_list.append(hidden_tuple_list[index])
layer_id += len(qkv_tuple_list)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x, qkv_tuple_return_list, hidden_tuple_return_list
def forward(self, x, layer_id_list=[], is_attn_loss=False, is_hidden_loss=False, is_hidden_org = False):
is_hidden_rel = is_hidden_org
x, qkv_tuple_return_list, hidden_tuple_return_list = self.forward_features(x, layer_id_list, is_hidden_org=is_hidden_rel)
x = self.head(x)
if is_attn_loss and is_hidden_loss:
return x, qkv_tuple_return_list, hidden_tuple_return_list
elif is_attn_loss:
return x, qkv_tuple_return_list
elif is_hidden_loss:
return x, hidden_tuple_return_list
else:
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| Cream/MiniViT/Mini-Swin/models/swin_transformer_distill.py/0 | {
"file_path": "Cream/MiniViT/Mini-Swin/models/swin_transformer_distill.py",
"repo_id": "Cream",
"token_count": 10285
} | 303 |
# Preparation
### Install the dependencies
```bash
pip install -r requirements-training.txt
pip install -v -e .
```
### Data Preparation
We need to prepare [ImageNet-1k](http://www.image-net.org/) datasets to do zero-shot classification task.
- ImageNet-1k
ImageNet-1k contains 1.28 M images for training and 50 K images for validation.
The train set and validation set should be saved as the `*.tar` archives:
```
ImageNet/
├── train.tar
└── val.tar
```
Our code also supports storing images as individual files as follow:
```
ImageNet/
├── train
│ ├── n01440764
│ │ ├── n01440764_10026.JPEG
│ │ ├── n01440764_10027.JPEG
...
├── val
│ ├── n01440764
│ │ ├── ILSVRC2012_val_00000293.JPEG
```
| Cream/TinyCLIP/docs/PREPARATION.md/0 | {
"file_path": "Cream/TinyCLIP/docs/PREPARATION.md",
"repo_id": "Cream",
"token_count": 268
} | 304 |
import functools
import logging
import os
import json
import math
import random
from datetime import datetime
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from torch.cuda.amp import GradScaler
from open_clip.model import convert_to_new_checkpoint, load_pruned_model
from open_clip.factory import load_model, get_tokenizer
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torchvision")
from open_clip.model import convert_to_new_checkpoint
from open_clip.weight_inherit import weight_inherit
from training.optimizer import build_optimizer
try:
import wandb
except ImportError:
wandb = None
try:
import torch.utils.tensorboard as tensorboard
except ImportError:
tensorboard = None
try:
import horovod.torch as hvd
except ImportError:
hvd = None
from open_clip import create_model_and_transforms, trace_model
from training.data import get_data
from training.distributed import is_master, init_distributed_device, world_info_from_env
from training.logger import setup_logging
from training.params import parse_args
from training.scheduler import cosine_lr, cosine_lr_start, step_lr, cosine_lr_start_nowarmup
from training.train import train_one_epoch, evaluate
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def compute_params(model):
def _get_params(model):
if model is None:
return 0
n_parameters = sum(p.numel()
for p in model.parameters() if p.requires_grad)
return n_parameters
def _get_buffers(model):
if model is None:
return 0
n_parameters = sum(p.numel() for p in model.buffers())
return n_parameters
n_parameters = _get_params(model)
num_params_image = _get_params(model.image_encoder_without_ddp.visual)
num_buffers_image = _get_buffers(model.image_encoder_without_ddp.visual)
num_params_text = _get_params(model.text_encoder_without_ddp.transformer)
num_token_emb = _get_params(model.text_encoder_without_ddp.token_embedding) if \
model.text_encoder_without_ddp.transformer is not None else 0
if model.text_encoder_without_ddp.transformer is not None and \
sum(p.numel() for p in model.text_encoder_without_ddp.transformer.parameters()) > 0:
num_params_text += _get_params(
model.text_encoder_without_ddp.token_embedding)
num_params_text += _get_params(model.text_encoder_without_ddp.ln_final)
num_params_text += (model.text_encoder_without_ddp.positional_embedding.numel() +
model.text_encoder_without_ddp.text_projection.numel())
return n_parameters, (num_params_image, num_buffers_image), num_params_text, num_token_emb
DEVICE = torch.device('cpu')
def _load_checkpoint(name):
global DEVICE
if '@' in name:
teacher_model_name, teacher_pretrained = name.split('@')
_model, _, _ = create_model_and_transforms(
teacher_model_name, pretrained=teacher_pretrained, device=DEVICE)
return _model.state_dict()
json_fname = os.path.join('exps', name + '.json')
if os.path.exists(json_fname):
model_info = json.load(open(json_fname))
name = model_info['resume']
state_dict = torch.load(name, map_location=DEVICE)
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
elif 'model' in state_dict:
state_dict = state_dict['model']
return state_dict
def main():
global DEVICE
args = parse_args()
is_bf16_supported = torch.cuda.is_bf16_supported()
if not is_bf16_supported:
for name in ['precision', 'image_precision', 'text_precision', 'logit_precision']:
if getattr(args, name) == 'amp_bfloat16':
setattr(args, name, 'amp')
if torch.cuda.is_available():
# This enables tf32 on Ampere GPUs which is only 8% slower than
# float16 and almost as accurate as float32
# This was a default in pytorch until 1.12
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
args.model = args.model.replace('/', '-')
# get the name of the experiments
if args.name is None:
args.name = '-'.join([
datetime.now().strftime("%Y_%m_%d-%H_%M_%S"),
f"model_{args.model}",
f"lr_{args.lr}",
f"b_{args.batch_size}",
f"j_{args.workers}",
f"p_{args.precision}",
])
# discover initial world args early so we can log properly
args.distributed = False
args.local_rank, args.rank, args.world_size = world_info_from_env()
args.log_path = None
if is_master(args, local=args.log_local):
log_base_path = os.path.join(args.logs, args.name)
os.makedirs(log_base_path, exist_ok=True)
log_filename = f'out-{args.rank}' if args.log_local else 'out.log'
args.log_path = os.path.join(log_base_path, log_filename)
if False and os.path.exists(args.log_path):
print(
"Error. Experiment already exists. Use --name {} to specify a new experiment."
)
return -1
# Set logger
args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
# fully initialize distributed device environment
device = init_distributed_device(args)
DEVICE = device
args.wandb = 'wandb' in args.report_to or 'all' in args.report_to
args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to
if is_master(args):
args.tensorboard_path = os.path.join(
args.logs, args.name, "tensorboard") if args.tensorboard else ''
args.checkpoint_path = os.path.join(
args.logs, args.name, "checkpoints")
for dirname in [args.tensorboard_path, args.checkpoint_path]:
if dirname:
os.makedirs(dirname, exist_ok=True)
else:
args.tensorboard_path = ''
args.checkpoint_path = ''
assert args.precision in ['amp', 'amp_bfloat16', 'fp16', 'fp32']
if args.precision == 'fp16':
logging.warning(
'It is recommended to use AMP mixed-precision instead of FP16. '
'FP16 support needs further verification and tuning, especially for train.')
if args.horovod:
logging.info(
f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.'
f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
elif args.distributed:
logging.info(
f'Running in distributed mode with multiple processes. Device: {args.device}.'
f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
else:
logging.info(f'Running with a single process. Device {args.device}.')
random_seed(args.seed, 0)
model, preprocess_train, preprocess_val = create_model_and_transforms(
args.model,
args.pretrained,
# the model will be converted to FP16 if args.precision is fp16
precision=args.precision,
device=device,
jit=args.torchscript,
force_quick_gelu=args.force_quick_gelu,
pretrained_image=args.pretrained_image,
image_mean=args.image_mean,
image_std=args.image_std,
args=args,
)
random_seed(args.seed, args.rank)
if is_master(args, local=args.log_local):
logging.info('train: {}\n val: {}'.format(
preprocess_train, preprocess_val))
n_parameters, (num_params_image,
num_buffers_image), num_params_text, num_token_emb = compute_params(model)
if is_master(args):
logging.info(f"number of params: {n_parameters / 1e6}")
logging.info(f'number of params image: {num_params_image / 1e6}')
logging.info(f'number of buffers image: {num_buffers_image / 1e6}')
logging.info(f'number of params text: {num_params_text / 1e6}')
logging.info(
f'number of token embedding in text encoder : {num_token_emb / 1e6}')
if args.distillation:
teacher_model = load_model(args.distillation_teacher, device=device)
if args.grad_checkpointing:
teacher_model.set_grad_checkpointing()
teacher_model.eval()
teacher_model.cuda()
# frozen parameters
for p in teacher_model.parameters():
p.requires_grad = False
model.teacher = [teacher_model]
else:
teacher_model = None
if args.trace:
model = trace_model(model, batch_size=args.batch_size, device=device)
if args.lock_image:
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
model.lock_image_tower(
unlocked_groups=args.lock_image_unlocked_groups,
freeze_bn_stats=args.lock_image_freeze_bn_stats)
logging.info('Locked image tower.')
if args.lock_text:
model.lock_text_tower()
logging.info('Locked text tower.')
model.cuda()
if args.grad_checkpointing:
model.set_grad_checkpointing()
if is_master(args):
logging.info("Model:")
logging.info(f"{str(model)}")
logging.info("Params:")
params_file = os.path.join(args.logs, args.name, "params.txt")
with open(params_file, "w") as f:
for name in sorted(vars(args)):
val = getattr(args, name)
logging.info(f" {name}: {val}")
f.write(f"{name}: {val}\n")
model_without_ddp = model
# create optimizer and scaler
optimizer = None
scaler = None
if args.train_data:
assert not args.trace, 'Cannot train with traced model'
optimizer = build_optimizer(args, model)
assert not args.horovod
use_loss_scale = any(map(
lambda x: x in ['amp', 'fp16'],
[args.precision, args.image_precision, args.text_precision, args.logit_precision]))
print(f'Use loss scale: {use_loss_scale}')
scaler = GradScaler(enabled=use_loss_scale)
checkpoint_fname_list = [None]
if is_master(args):
if os.path.isdir(args.checkpoint_path):
ckpts_list = []
for name in os.listdir(args.checkpoint_path):
if name.startswith('epoch_') and name.endswith('.pt'):
name = os.path.splitext(name)[0]
name = name[len('epoch_'):]
epoch, it = map(int, name.split('_iter_'))
ckpts_list.append((epoch, it))
if len(ckpts_list) > 0:
ckpts_list.sort(reverse=True)
for epoch, it in ckpts_list:
checkpoint_fname = os.path.join(
args.checkpoint_path, f"epoch_{epoch}_iter_{it}.pt")
try:
# check valid
torch.load(checkpoint_fname, map_location='cpu')
checkpoint_fname_list[0] = checkpoint_fname
break
except Exception as e:
print(f'Load Ckpt Fail: {e}')
torch.distributed.broadcast_object_list(checkpoint_fname_list, src=0)
if checkpoint_fname_list[0] is not None:
print(
f'overwrite checkpoint path: {checkpoint_fname_list[0]}, the original path is {args.resume}')
args.resume = checkpoint_fname_list[0]
# determine if this worker should save logs and checkpoints. only do so if it is rank == 0
start_epoch = 0
# optionally resume from a checkpoint
start_epoch = 0
start_iter = 0
if args.resume is not None:
# this part only suppots resume clip model without mask. [TODO]: support resume clip model with mask.
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume, map_location='cpu')
if args.prune_image and args.prune_text:
sd = checkpoint["state_dict"]
if not args.distributed and next(iter(sd.items()))[0].startswith('module'):
sd = {k[len('module.'):]: v for k, v in sd.items()}
sd = {k.replace('.module', ''): v for k, v in sd.items()}
logging.info('convert pruned model to base')
load_pruned_model(model, sd)
if args.load_last_stage is False:
logging.info('=== FUSE MASK IMAGE ===')
num_params_before_fuse = sum(
p.numel() for p in model.image_encoder_without_ddp.parameters() if p.requires_grad)
with torch.no_grad():
model.image_encoder_without_ddp.eval()
image = torch.randn((1, 3, 224, 224), device='cuda')
model.image_encoder_without_ddp(image)
model.image_encoder_without_ddp = model.image_encoder_without_ddp.prune()
assert hasattr(
model.image_encoder_without_ddp, 'l0_module')
model.image_encoder_without_ddp.l0_module = None
num_params_after_fuse = sum(
p.numel() for p in model.image_encoder_without_ddp.parameters() if p.requires_grad)
logging.info(
f'=> fuse MASK image: {num_params_before_fuse} -> {num_params_after_fuse}')
logging.info('=== FUSE MASK TEXT ===')
num_params_before_fuse = sum(
p.numel() for p in model.text_encoder_without_ddp.parameters() if p.requires_grad)
with torch.no_grad():
model.text_encoder_without_ddp.eval()
text = torch.randint(0, 100, (1, 77), device='cuda')
model.text_encoder_without_ddp(text)
model.text_encoder_without_ddp = model.text_encoder_without_ddp.prune()
assert hasattr(model.text_encoder_without_ddp, 'l0_module')
model.text_encoder_without_ddp.l0_module = None
num_params_after_fuse = sum(
p.numel() for p in model.text_encoder_without_ddp.parameters() if p.requires_grad)
logging.info(
f'=> fuse MASK text: {num_params_before_fuse} -> {num_params_after_fuse}')
args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args)
else:
sd = checkpoint["state_dict"]
new_state_dict = {}
for key, value in sd.items():
if 'logit_scale' in key:
new_key = '_logit_scale.logit_scale'
elif key.startswith('module.visual'):
new_key = key.replace(
'module.visual', '_image_encoder.visual')
elif key.startswith('module'):
new_key = key.replace('module', '_text_encoder')
else:
new_key = key
new_state_dict[new_key] = value
sd = new_state_dict
if not args.distributed and next(iter(sd.items()))[0].startswith('module'):
sd = {k[len('module.'):]: v for k, v in sd.items()}
model.load_state_dict(sd)
if 'epoch' in checkpoint and args.load_last_stage is False:
# resuming a train checkpoint w/ epoch and optimizer state
start_epoch = checkpoint["epoch"]
if optimizer is not None and 'optimizer' in checkpoint and args.load_last_stage is False:
if len(optimizer) == len(checkpoint['optimizer']):
for opt, v in zip(optimizer, checkpoint["optimizer"]):
assert len(opt.param_groups) == len(v['param_groups']), \
f'number of param groups mismatch: {len(opt.param_groups)} vs {len(v["param_groups"])}'
opt.load_state_dict(v)
if scaler is not None and 'scaler' in checkpoint:
scaler.load_state_dict(checkpoint['scaler'])
else:
logging.info(f"optimizer load fails, use new one")
if 'iter_in_epoch' in checkpoint and args.load_last_stage is False:
start_iter = checkpoint['iter_in_epoch'] + 1
logging.info(f"fast_forward dataloader to iter {start_iter}")
else:
raise FileNotFoundError(f'=> no checkpoint found at {args.resume}')
else:
def remove_prefix_module(state_dict):
# remove the first or the second module
return convert_to_new_checkpoint(state_dict)
def add_prefix_module(state_dict):
if all(map(lambda x: not x.startswith('module.'), state_dict.keys())):
return {'module.' + k: v for k, v in state_dict.items()}
return state_dict
def model_load_checkpoint(model, state_dict):
if hasattr(model, 'module'):
state_dict = add_prefix_module(state_dict)
model.load_state_dict(state_dict, strict=True)
def encoder_weight_inherit(student_state, teacher_state, encoder_prefix, head_dim):
def _filter_prefix(state, prefix):
return dict((k, v) for k, v in state.items() if k.startswith(prefix) and 'l0_module' not in k)
student_fs = _filter_prefix(student_state, encoder_prefix)
teacher_fs = _filter_prefix(teacher_state, encoder_prefix)
logging.info(
f' student: {len(student_fs)}, teacher: {len(teacher_fs)}')
weight_inherit(student_fs, teacher_fs, head_dim)
num = 0
for k, v in student_fs.items():
num += v.numel()
student_state[k] = v
return num
if args.pretrained_image_file:
logging.info('=== INHERIT IMAGE ===')
# no resume, try to load image file
state_dict = remove_prefix_module(model.state_dict())
# ckpt
image_checkpoint = remove_prefix_module(
_load_checkpoint(args.pretrained_image_file))
num_inherit = encoder_weight_inherit(
state_dict, image_checkpoint, '_image_encoder.visual', head_dim=model.visual.head_dim)
# format: _image_encoder.xxxx
model_load_checkpoint(model, state_dict)
assert num_inherit == num_params_image + \
num_buffers_image, (num_inherit,
num_params_image, num_buffers_image)
logging.info(
f'=> loaded image checkpoint {args.pretrained_image_file} ({num_inherit} image params)')
if args.pretrained_text_file:
logging.info('=== INHERIT TEXT ===')
# student with ddp
state_dict = remove_prefix_module(model.state_dict())
# teacher without ddp
text_checkpoint = remove_prefix_module(
_load_checkpoint(args.pretrained_text_file))
# format: _text_encoder.xxxx
num_inherit = encoder_weight_inherit(
state_dict, text_checkpoint, '_text_encoder', head_dim=model.transformer.head_dim)
assert num_inherit == num_params_text, (
num_inherit, num_params_text)
logging.info(
f'=> loaded text checkpoint {args.pretrained_text_file} ({num_inherit} text params)')
model_load_checkpoint(model, state_dict)
if args.distributed and not args.horovod:
ddp_args = {}
if args.ddp_static_graph:
# this doesn't exist in older PyTorch, arg only added if enabled
ddp_args['static_graph'] = True
ddp_fn = functools.partial(
torch.nn.parallel.DistributedDataParallel, device_ids=[device], **ddp_args)
# re-ddpify
model.ddpify(ddp_fn)
# initialize datasets
data = get_data(args, (preprocess_train, preprocess_val),
epoch=start_epoch, tokenizer=get_tokenizer(args.model))
print(f"Dataset: {set(data.keys())}")
assert len(data), 'At least one train or eval dataset must be specified.'
args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args)
writer = None
if args.save_logs and args.tensorboard:
assert tensorboard is not None, "Please install tensorboard."
writer = tensorboard.SummaryWriter(args.tensorboard_path)
if args.wandb and is_master(args):
assert wandb is not None, 'Please install wandb.'
logging.debug('Starting wandb.')
args.train_sz = data["train"].dataloader.num_samples
if args.val_data is not None:
args.val_sz = data["val"].dataloader.num_samples
# you will have to configure this for your project!
wandb_output_path = args.checkpoint_path
wandb.init(
project="tinyclip",
name=args.name,
notes=args.wandb_notes,
tags=[],
config=vars(args),
dir=wandb_output_path,
)
if args.debug:
wandb.watch(model, log='all')
wandb.save(params_file)
logging.debug('Finished loading wandb.')
# create scheduler if train
scheduler = None
if 'train' in data and optimizer is not None:
total_steps = data["train"].dataloader.num_batches * args.epochs
if args.prune_image or args.prune_text:
scheduler = cosine_lr(
optimizer[0:3], args.lr, args.prune_step, total_steps)
scheduler_l0 = step_lr(optimizer[-1], args.prune_step)
else:
scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)
scheduler_l0 = None
if 'train' not in data or args.eval:
results = evaluate(model, data, start_epoch, args, writer)
if is_master(args):
print(results)
return
for epoch in range(start_epoch, math.ceil(args.epochs)):
if is_master(args):
logging.info(f'Start epoch {epoch}')
rtn = train_one_epoch(model, data, epoch, optimizer, scaler,
scheduler, scheduler_l0, args, writer, start_iter)
if isinstance(rtn, str) and rtn == 'non-finite loss':
break
else:
model, optimizer, scaler, scheduler, scheduler_l0, args = rtn
start_iter = 0
if args.wandb and is_master(args):
wandb.finish()
def copy_codebase(args):
from shutil import copytree, ignore_patterns
new_code_path = os.path.join(args.logs, args.name, "code")
if False and os.path.exists(new_code_path):
print(
f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
)
return -1
print(f"Copying codebase to {new_code_path}")
current_code_path = os.path.realpath(__file__)
for _ in range(3):
current_code_path = os.path.dirname(current_code_path)
copytree(current_code_path, new_code_path,
ignore=ignore_patterns('log', 'logs', 'wandb'))
print("Done copying code.")
return 1
if __name__ == "__main__":
main()
| Cream/TinyCLIP/src/training/main.py/0 | {
"file_path": "Cream/TinyCLIP/src/training/main.py",
"repo_id": "Cream",
"token_count": 11089
} | 305 |
# --------------------------------------------------------
# TinyViT Config
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyViT
# --------------------------------------------------------
import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
# -----------------------------------------------------------------------------
# Data settings
# -----------------------------------------------------------------------------
_C.DATA = CN()
# Batch size for a single GPU, could be overwritten by command line argument
_C.DATA.BATCH_SIZE = 128
# Path to dataset, could be overwritten by command line argument
_C.DATA.DATA_PATH = ''
# Dataset name
_C.DATA.DATASET = 'imagenet'
# Dataset mean/std type
_C.DATA.MEAN_AND_STD_TYPE = "default"
# Input image size
_C.DATA.IMG_SIZE = 224
# Interpolation to resize image (random, bilinear, bicubic)
_C.DATA.INTERPOLATION = 'bicubic'
# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.DATA.PIN_MEMORY = True
# Number of data loading threads
_C.DATA.NUM_WORKERS = 8
# Data image filename format
_C.DATA.FNAME_FORMAT = '{}.jpeg'
# Data debug, when debug is True, only use few images
_C.DATA.DEBUG = False
# -----------------------------------------------------------------------------
# Model settings
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model type
_C.MODEL.TYPE = 'tiny_vit'
# Model name
_C.MODEL.NAME = 'tiny_vit'
# Pretrained weight from checkpoint, could be imagenet22k pretrained weight
# could be overwritten by command line argument
_C.MODEL.PRETRAINED = ''
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.RESUME = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 1000
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.1
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# TinyViT Model
_C.MODEL.TINY_VIT = CN()
_C.MODEL.TINY_VIT.IN_CHANS = 3
_C.MODEL.TINY_VIT.DEPTHS = [2, 2, 6, 2]
_C.MODEL.TINY_VIT.NUM_HEADS = [3, 6, 12, 18]
_C.MODEL.TINY_VIT.WINDOW_SIZES = [7, 7, 14, 7]
_C.MODEL.TINY_VIT.EMBED_DIMS = [96, 192, 384, 576]
_C.MODEL.TINY_VIT.MLP_RATIO = 4.
_C.MODEL.TINY_VIT.MBCONV_EXPAND_RATIO = 4.0
_C.MODEL.TINY_VIT.LOCAL_CONV_SIZE = 3
# DISTILL
_C.DISTILL = CN()
_C.DISTILL.ENABLED = False
_C.DISTILL.TEACHER_LOGITS_PATH = ''
_C.DISTILL.SAVE_TEACHER_LOGITS = False
_C.DISTILL.LOGITS_TOPK = 100
# -----------------------------------------------------------------------------
# Training settings
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.START_EPOCH = 0
_C.TRAIN.EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_LR = 5e-7
_C.TRAIN.MIN_LR = 5e-6
# Clip gradient norm
_C.TRAIN.CLIP_GRAD = 5.0
# Auto resume from latest checkpoint
_C.TRAIN.AUTO_RESUME = True
# Gradient accumulation steps
# could be overwritten by command line argument
_C.TRAIN.ACCUMULATION_STEPS = 1
# Whether to use gradient checkpointing to save memory
# could be overwritten by command line argument
_C.TRAIN.USE_CHECKPOINT = False
# train learning rate decay
_C.TRAIN.LAYER_LR_DECAY = 1.0
# batch norm is in evaluation mode when training
_C.TRAIN.EVAL_BN_WHEN_TRAINING = False
# LR scheduler
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
# Epoch interval to decay LR, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
# LR decay rate, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
# Optimizer
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'adamw'
# Optimizer Epsilon
_C.TRAIN.OPTIMIZER.EPS = 1e-8
# Optimizer Betas
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
# SGD momentum
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# -----------------------------------------------------------------------------
# Augmentation settings
# -----------------------------------------------------------------------------
_C.AUG = CN()
# Color jitter factor
_C.AUG.COLOR_JITTER = 0.4
# Use AutoAugment policy. "v0" or "original"
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
# Random erase prob
_C.AUG.REPROB = 0.25
# Random erase mode
_C.AUG.REMODE = 'pixel'
# Random erase count
_C.AUG.RECOUNT = 1
# Mixup alpha, mixup enabled if > 0
_C.AUG.MIXUP = 0.8
# Cutmix alpha, cutmix enabled if > 0
_C.AUG.CUTMIX = 1.0
# Cutmix min/max ratio, overrides alpha and enables cutmix if set
_C.AUG.CUTMIX_MINMAX = None
# Probability of performing mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5
# How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
_C.AUG.MIXUP_MODE = 'batch'
# -----------------------------------------------------------------------------
# Testing settings
# -----------------------------------------------------------------------------
_C.TEST = CN()
# Whether to use center crop when testing
_C.TEST.CROP = True
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
# Enable Pytorch automatic mixed precision (amp).
_C.AMP_ENABLE = True
# Path to output folder, overwritten by command line argument
_C.OUTPUT = ''
# Tag of experiment, overwritten by command line argument
_C.TAG = 'default'
# Frequency to save checkpoint
_C.SAVE_FREQ = 1
# Frequency to logging info
_C.PRINT_FREQ = 10
# Fixed random seed
_C.SEED = 0
# Perform evaluation only, overwritten by command line argument
_C.EVAL_MODE = False
# Test throughput only, overwritten by command line argument
_C.THROUGHPUT_MODE = False
# local rank for DistributedDataParallel, given by command line argument
_C.LOCAL_RANK = 0
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
# merge from specific arguments
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.disable_amp or args.only_cpu:
config.AMP_ENABLE = False
if args.output:
config.OUTPUT = args.output
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.throughput:
config.THROUGHPUT_MODE = True
# set local rank for distributed training
if args.local_rank is None and 'LOCAL_RANK' in os.environ:
args.local_rank = int(os.environ['LOCAL_RANK'])
# set local rank for distributed training
config.LOCAL_RANK = args.local_rank
# output folder
config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
config.freeze()
def get_config(args=None):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
if args is not None:
update_config(config, args)
return config
| Cream/TinyViT/config.py/0 | {
"file_path": "Cream/TinyViT/config.py",
"repo_id": "Cream",
"token_count": 2954
} | 306 |
import numpy as np
from numpy.random import Generator, PCG64
RNG = None
class AugRandomContext:
def __init__(self, seed):
self.seed = seed
def __enter__(self):
global RNG
assert RNG is None
RNG = Generator(PCG64(seed=self.seed))
def __exit__(self, *_):
global RNG
RNG = None
class random:
# inline: random module
@staticmethod
def random():
return RNG.random()
@staticmethod
def uniform(a, b):
return random.random() * (b - a) + a
@staticmethod
def randint(a, b):
# [low, high]
return min(int(random.random() * (b - a + 1)) + a, b)
@staticmethod
def gauss(mu, sigma):
return RNG.normal(mu, sigma)
class np_random:
# numpy.random
@staticmethod
def choice(a, size, *args, **kwargs):
return RNG.choice(a, size, *args, **kwargs)
@staticmethod
def randint(low, high, size=None, dtype=int):
# [low, high)
if size is None:
return dtype(random.randint(low, high - 1))
out = [random.randint(low, high - 1) for _ in range(size)]
return np.array(out, dtype=dtype)
@staticmethod
def rand(*shape):
return RNG.random(shape)
@staticmethod
def beta(a, b, size=None):
return RNG.beta(a, b, size=size)
if __name__ == '__main__':
for _ in range(2):
with AugRandomContext(seed=0):
print(np_random.randint(-100, 100, size=10))
with AugRandomContext(seed=1):
print(np_random.randint(-100, 100, size=10))
| Cream/TinyViT/data/augmentation/aug_random.py/0 | {
"file_path": "Cream/TinyViT/data/augmentation/aug_random.py",
"repo_id": "Cream",
"token_count": 724
} | 307 |
import os
from .parser_image_folder import ParserImageFolder
from .parser_image_tar import ParserImageTar
from .parser_image_in_tar import ParserImageInTar
def create_parser(name, root, split='train', **kwargs):
name = name.lower()
name = name.split('/', 2)
prefix = ''
if len(name) > 1:
prefix = name[0]
name = name[-1]
# FIXME improve the selection right now just tfds prefix or fallback path, will need options to
# explicitly select other options shortly
if prefix == 'tfds':
from .parser_tfds import ParserTfds # defer tensorflow import
parser = ParserTfds(root, name, split=split, **kwargs)
else:
assert os.path.exists(root)
# default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder
# FIXME support split here, in parser?
if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar':
parser = ParserImageInTar(root, **kwargs)
else:
parser = ParserImageFolder(root, **kwargs)
return parser
| Cream/TinyViT/data/augmentation/parsers/parser_factory.py/0 | {
"file_path": "Cream/TinyViT/data/augmentation/parsers/parser_factory.py",
"repo_id": "Cream",
"token_count": 410
} | 308 |
# The tutorial of saving teacher sparse logits
This document shows how to save and check teacher sparse soft labels.
We provide an example to store the sparse soft labels of **CLIP-ViT-Large/14-22k** on ImageNet-22k. With the pretrained teacher, **TinyViT-5/11/21M** will achieve the Top-1 accuracy of **80.7/83.2/84.8 %** on ImageNet-1k valiadation set.
## Save teacher sparse logits
Firstly, we prepare the IN-22k dataset ([Data Preparation](./PREPARATION.md)), then download the checkpoint of CLIP-ViT-Large/14-22k in [the link](https://github.com/wkcn/TinyViT-model-zoo/releases/download/pretrained_teacher/clip_vit_large_patch14_22k.pth).
The following command will store the teacher sparse logits.
```bash
python -m torch.distributed.launch --nproc_per_node 8 save_logits.py --cfg configs/teacher/clip_vit_large_patch14_22k.yaml --data-path ./ImageNet-22k --batch-size 128 --eval --resume checkpoints/clip_vit_large_patch14_22k.pth --opts DISTILL.TEACHER_LOGITS_PATH ./teacher_logits/
```
**The accuracy of CLIP-ViT-Large/14-22k (w/o finetune on IN-1k) on IN-1k is Acc@1 85.894 Acc@5 97.566.**
Since IN-22k is too large, we recommend to use few data to debug by adding the argument `DATA.DEBUG True`.
- How to save sparse logits **in parallel** ?
Since the teacher logits per epoch is independent, they can be saved in parallel. Specifically, each machine saves a segment of the whole epochs individually.
We can add the epoch interval into the command, e.g.
```bash
python -m torch.distributed.launch --nproc_per_node 8 save_logits.py --cfg configs/teacher/clip_vit_large_patch14_22k.yaml --data-path ./ImageNet-22k --batch-size 128 --eval --resume checkpoints/clip_vit_large_patch14_22k.pth --opts DISTILL.TEACHER_LOGITS_PATH ./teacher_logits/ TRAIN.START_EPOCH 30 TRAIN.EPOCHS 40
```
The sparse logits between 30 to 40 will be saved.
## Check teacher sparse logits
After saving the logits, we can check them by adding the extra argument `--check-saved-logits`.
```bash
python -m torch.distributed.launch --nproc_per_node 8 save_logits.py --cfg configs/teacher/clip_vit_large_patch14_22k.yaml --data-path ./ImageNet-22k --batch-size 128 --eval --resume checkpoints/clip_vit_large_patch14_22k.pth --check-saved-logits --opts DISTILL.TEACHER_LOGITS_PATH ./teacher_logits
```
| Cream/TinyViT/docs/SAVE_TEACHER_LOGITS.md/0 | {
"file_path": "Cream/TinyViT/docs/SAVE_TEACHER_LOGITS.md",
"repo_id": "Cream",
"token_count": 780
} | 309 |
import unittest
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import torch
import timm
from models import tiny_vit
class ModelsTestCase(unittest.TestCase):
"""Test for models.py"""
def setUp(self):
self.ckpt_names = [
('tiny_vit_5m_224', ['22k_distill', '22kto1k_distill', '1k']),
('tiny_vit_11m_224', ['22k_distill', '22kto1k_distill', '1k']),
('tiny_vit_21m_224', ['22k_distill', '22kto1k_distill', '1k']),
('tiny_vit_21m_384', ['22kto1k_distill']),
('tiny_vit_21m_512', ['22kto1k_distill']),
]
def test_load_model(self):
"""Test for load_model"""
for variant, pretrained_types in self.ckpt_names:
# empty load
with self.subTest(variant=variant, pretrained_type='empty'):
model = timm.create_model(variant)
assert model.head.weight.shape[0] == 1000
# load pretrained
for pretrained_type in pretrained_types:
with self.subTest(variant=variant, pretrained_type=pretrained_type):
pretrained_num_classes = 21841 if pretrained_type == '22k_distill' else 1000
model = timm.create_model(variant, pretrained=True, num_classes=pretrained_num_classes)
assert model.head.weight.shape[0] == pretrained_num_classes
model = timm.create_model(variant, pretrained=True, pretrained_type=pretrained_type)
assert model.head.weight.shape[0] == pretrained_num_classes
def test_finetune(self):
pretrained_num_classes = 1000
finetune_num_classes = 100
model1 = timm.create_model('tiny_vit_5m_224', pretrained=True, pretrained_type='22kto1k_distill')
model2 = timm.create_model('tiny_vit_5m_224', pretrained=True, pretrained_type='22kto1k_distill',
num_classes=finetune_num_classes)
state_dict_1 = model1.state_dict()
state_dict_2 = model2.state_dict()
keys = list(state_dict_1.keys())
head_keys = ['head.weight', 'head.bias']
for name in head_keys:
self.assertEqual(state_dict_1.pop(name).shape[0], pretrained_num_classes)
self.assertEqual(state_dict_2.pop(name).shape[0], finetune_num_classes)
for key in keys:
if key not in head_keys:
self.assertTrue(torch.equal(state_dict_1[key], state_dict_2[key]))
def test_forward(self):
for variant, _ in self.ckpt_names:
with self.subTest(variant=variant):
model = timm.create_model(variant)
img_size = int(variant.split('_')[-1])
img = torch.randn(1, 3, img_size, img_size)
out = model(img)
assert out.shape[-1] == 1000
if __name__ == '__main__':
unittest.main()
| Cream/TinyViT/tests/test_models.py/0 | {
"file_path": "Cream/TinyViT/tests/test_models.py",
"repo_id": "Cream",
"token_count": 1413
} | 310 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
| Cream/iRPE/DETR-with-iRPE/models/detr.py/0 | {
"file_path": "Cream/iRPE/DETR-with-iRPE/models/detr.py",
"repo_id": "Cream",
"token_count": 7219
} | 311 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| Cream/iRPE/DETR-with-iRPE/util/__init__.py/0 | {
"file_path": "Cream/iRPE/DETR-with-iRPE/util/__init__.py",
"repo_id": "Cream",
"token_count": 17
} | 312 |
from functools import partial
from itertools import repeat
from torch._six import container_abcs
import logging
import os
from collections import OrderedDict
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from einops.layers.torch import Rearrange
from timm.models.layers import DropPath, trunc_normal_
from .registry import register_model
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class Mlp(nn.Module):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self,
dim_in,
dim_out,
num_heads,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
method='dw_bn',
kernel_size=3,
stride_kv=1,
stride_q=1,
padding_kv=1,
padding_q=1,
with_cls_token=True,
**kwargs
):
super().__init__()
self.stride_kv = stride_kv
self.stride_q = stride_q
self.dim = dim_out
self.num_heads = num_heads
# head_dim = self.qkv_dim // num_heads
self.scale = dim_out ** -0.5
self.with_cls_token = with_cls_token
self.conv_proj_q = self._build_projection(
dim_in, dim_out, kernel_size, padding_q,
stride_q, 'linear' if method == 'avg' else method
)
self.conv_proj_k = self._build_projection(
dim_in, dim_out, kernel_size, padding_kv,
stride_kv, method
)
self.conv_proj_v = self._build_projection(
dim_in, dim_out, kernel_size, padding_kv,
stride_kv, method
)
self.proj_q = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.proj_k = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.proj_v = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_out, dim_out)
self.proj_drop = nn.Dropout(proj_drop)
def _build_projection(self,
dim_in,
dim_out,
kernel_size,
padding,
stride,
method):
if method == 'dw_bn':
proj = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(
dim_in,
dim_in,
kernel_size=kernel_size,
padding=padding,
stride=stride,
bias=False,
groups=dim_in
)),
('bn', nn.BatchNorm2d(dim_in)),
('rearrage', Rearrange('b c h w -> b (h w) c')),
]))
elif method == 'avg':
proj = nn.Sequential(OrderedDict([
('avg', nn.AvgPool2d(
kernel_size=kernel_size,
padding=padding,
stride=stride,
ceil_mode=True
)),
('rearrage', Rearrange('b c h w -> b (h w) c')),
]))
elif method == 'linear':
proj = None
else:
raise ValueError('Unknown method ({})'.format(method))
return proj
def forward_conv(self, x, h, w):
if self.with_cls_token:
cls_token, x = torch.split(x, [1, h*w], 1)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
if self.conv_proj_q is not None:
q = self.conv_proj_q(x)
else:
q = rearrange(x, 'b c h w -> b (h w) c')
if self.conv_proj_k is not None:
k = self.conv_proj_k(x)
else:
k = rearrange(x, 'b c h w -> b (h w) c')
if self.conv_proj_v is not None:
v = self.conv_proj_v(x)
else:
v = rearrange(x, 'b c h w -> b (h w) c')
if self.with_cls_token:
q = torch.cat((cls_token, q), dim=1)
k = torch.cat((cls_token, k), dim=1)
v = torch.cat((cls_token, v), dim=1)
return q, k, v
def forward(self, x, h, w):
if (
self.conv_proj_q is not None
or self.conv_proj_k is not None
or self.conv_proj_v is not None
):
q, k, v = self.forward_conv(x, h, w)
q = rearrange(self.proj_q(q), 'b t (h d) -> b h t d', h=self.num_heads)
k = rearrange(self.proj_k(k), 'b t (h d) -> b h t d', h=self.num_heads)
v = rearrange(self.proj_v(v), 'b t (h d) -> b h t d', h=self.num_heads)
attn_score = torch.einsum('bhlk,bhtk->bhlt', [q, k]) * self.scale
attn = F.softmax(attn_score, dim=-1)
attn = self.attn_drop(attn)
x = torch.einsum('bhlt,bhtv->bhlv', [attn, v])
x = rearrange(x, 'b h t d -> b t (h d)')
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def compute_macs(module, input, output):
# T: num_token
# S: num_token
input = input[0]
flops = 0
_, T, C = input.shape
H = W = int(np.sqrt(T-1)) if module.with_cls_token else int(np.sqrt(T))
H_Q = H / module.stride_q
W_Q = H / module.stride_q
T_Q = H_Q * W_Q + 1 if module.with_cls_token else H_Q * W_Q
H_KV = H / module.stride_kv
W_KV = W / module.stride_kv
T_KV = H_KV * W_KV + 1 if module.with_cls_token else H_KV * W_KV
# C = module.dim
# S = T
# Scaled-dot-product macs
# [B x T x C] x [B x C x T] --> [B x T x S]
# multiplication-addition is counted as 1 because operations can be fused
flops += T_Q * T_KV * module.dim
# [B x T x S] x [B x S x C] --> [B x T x C]
flops += T_Q * module.dim * T_KV
if (
hasattr(module, 'conv_proj_q')
and hasattr(module.conv_proj_q, 'conv')
):
params = sum(
[
p.numel()
for p in module.conv_proj_q.conv.parameters()
]
)
flops += params * H_Q * W_Q
if (
hasattr(module, 'conv_proj_k')
and hasattr(module.conv_proj_k, 'conv')
):
params = sum(
[
p.numel()
for p in module.conv_proj_k.conv.parameters()
]
)
flops += params * H_KV * W_KV
if (
hasattr(module, 'conv_proj_v')
and hasattr(module.conv_proj_v, 'conv')
):
params = sum(
[
p.numel()
for p in module.conv_proj_v.conv.parameters()
]
)
flops += params * H_KV * W_KV
params = sum([p.numel() for p in module.proj_q.parameters()])
flops += params * T_Q
params = sum([p.numel() for p in module.proj_k.parameters()])
flops += params * T_KV
params = sum([p.numel() for p in module.proj_v.parameters()])
flops += params * T_KV
params = sum([p.numel() for p in module.proj.parameters()])
flops += params * T
module.__flops__ += flops
class Block(nn.Module):
def __init__(self,
dim_in,
dim_out,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
**kwargs):
super().__init__()
self.with_cls_token = kwargs['with_cls_token']
self.norm1 = norm_layer(dim_in)
self.attn = Attention(
dim_in, dim_out, num_heads, qkv_bias, attn_drop, drop,
**kwargs
)
self.drop_path = DropPath(drop_path) \
if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
dim_mlp_hidden = int(dim_out * mlp_ratio)
self.mlp = Mlp(
in_features=dim_out,
hidden_features=dim_mlp_hidden,
act_layer=act_layer,
drop=drop
)
def forward(self, x, h, w):
res = x
x = self.norm1(x)
attn = self.attn(x, h, w)
x = res + self.drop_path(attn)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConvEmbed(nn.Module):
""" Image to Conv Embedding
"""
def __init__(self,
patch_size=7,
in_chans=3,
embed_dim=64,
stride=4,
padding=2,
norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_chans, embed_dim,
kernel_size=patch_size,
stride=stride,
padding=padding
)
self.norm = norm_layer(embed_dim) if norm_layer else None
def forward(self, x):
x = self.proj(x)
B, C, H, W = x.shape
x = rearrange(x, 'b c h w -> b (h w) c')
if self.norm:
x = self.norm(x)
x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
patch_size=16,
patch_stride=16,
patch_padding=0,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
init='trunc_norm',
**kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.rearrage = None
self.patch_embed = ConvEmbed(
# img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
stride=patch_stride,
padding=patch_padding,
embed_dim=embed_dim,
norm_layer=norm_layer
)
with_cls_token = kwargs['with_cls_token']
if with_cls_token:
self.cls_token = nn.Parameter(
torch.zeros(1, 1, embed_dim)
)
else:
self.cls_token = None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for j in range(depth):
blocks.append(
Block(
dim_in=embed_dim,
dim_out=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[j],
act_layer=act_layer,
norm_layer=norm_layer,
**kwargs
)
)
self.blocks = nn.ModuleList(blocks)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
if init == 'xavier':
self.apply(self._init_weights_xavier)
else:
self.apply(self._init_weights_trunc_normal)
def _init_weights_trunc_normal(self, m):
if isinstance(m, nn.Linear):
logging.info('=> init weight of Linear from trunc norm')
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
logging.info('=> init bias of Linear to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _init_weights_xavier(self, m):
if isinstance(m, nn.Linear):
logging.info('=> init weight of Linear from xavier uniform')
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
logging.info('=> init bias of Linear to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
B, C, H, W = x.size()
x = rearrange(x, 'b c h w -> b (h w) c')
cls_tokens = None
if self.cls_token is not None:
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x, H, W)
if self.cls_token is not None:
cls_tokens, x = torch.split(x, [1, H*W], 1)
x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)
return x, cls_tokens
class ConvolutionalVisionTransformer(nn.Module):
def __init__(self,
in_chans=3,
num_classes=1000,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
init='trunc_norm',
spec=None):
super().__init__()
self.num_classes = num_classes
self.num_stages = spec['NUM_STAGES']
for i in range(self.num_stages):
kwargs = {
'patch_size': spec['PATCH_SIZE'][i],
'patch_stride': spec['PATCH_STRIDE'][i],
'patch_padding': spec['PATCH_PADDING'][i],
'embed_dim': spec['DIM_EMBED'][i],
'depth': spec['DEPTH'][i],
'num_heads': spec['NUM_HEADS'][i],
'mlp_ratio': spec['MLP_RATIO'][i],
'qkv_bias': spec['QKV_BIAS'][i],
'drop_rate': spec['DROP_RATE'][i],
'attn_drop_rate': spec['ATTN_DROP_RATE'][i],
'drop_path_rate': spec['DROP_PATH_RATE'][i],
'with_cls_token': spec['CLS_TOKEN'][i],
'method': spec['QKV_PROJ_METHOD'][i],
'kernel_size': spec['KERNEL_QKV'][i],
'padding_q': spec['PADDING_Q'][i],
'padding_kv': spec['PADDING_KV'][i],
'stride_kv': spec['STRIDE_KV'][i],
'stride_q': spec['STRIDE_Q'][i],
}
stage = VisionTransformer(
in_chans=in_chans,
init=init,
act_layer=act_layer,
norm_layer=norm_layer,
**kwargs
)
setattr(self, f'stage{i}', stage)
in_chans = spec['DIM_EMBED'][i]
dim_embed = spec['DIM_EMBED'][-1]
self.norm = norm_layer(dim_embed)
self.cls_token = spec['CLS_TOKEN'][-1]
# Classifier head
self.head = nn.Linear(dim_embed, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.head.weight, std=0.02)
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] is '*'
)
if need_init:
if verbose:
logging.info(f'=> init {k} from {pretrained}')
if 'pos_embed' in k and v.size() != model_dict[k].size():
size_pretrained = v.size()
size_new = model_dict[k].size()
logging.info(
'=> load_pretrained: resized variant: {} to {}'
.format(size_pretrained, size_new)
)
ntok_new = size_new[1]
ntok_new -= 1
posemb_tok, posemb_grid = v[:, :1], v[0, 1:]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
logging.info(
'=> load_pretrained: grid-size from {} to {}'
.format(gs_old, gs_new)
)
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = scipy.ndimage.zoom(
posemb_grid, zoom, order=1
)
posemb_grid = posemb_grid.reshape(1, gs_new ** 2, -1)
v = torch.tensor(
np.concatenate([posemb_tok, posemb_grid], axis=1)
)
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
@torch.jit.ignore
def no_weight_decay(self):
layers = set()
for i in range(self.num_stages):
layers.add(f'stage{i}.pos_embed')
layers.add(f'stage{i}.cls_token')
return layers
def forward_features(self, x):
for i in range(self.num_stages):
x, cls_tokens = getattr(self, f'stage{i}')(x)
if self.cls_token:
x = self.norm(cls_tokens)
x = torch.squeeze(x)
else:
x = rearrange(x, 'b c h w -> b (h w) c')
x = self.norm(x)
x = torch.mean(x, dim=1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def get_cls_model(config, **kwargs):
msvit_spec = config.MODEL.SPEC
msvit = ConvolutionalVisionTransformer(
in_chans=3,
num_classes=config.MODEL.NUM_CLASSES,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec
)
if config.MODEL.INIT_WEIGHTS:
msvit.init_weights(
config.MODEL.PRETRAINED,
config.MODEL.PRETRAINED_LAYERS,
config.VERBOSE
)
return msvit
| CvT/lib/models/cls_cvt.py/0 | {
"file_path": "CvT/lib/models/cls_cvt.py",
"repo_id": "CvT",
"token_count": 11826
} | 313 |
name: project_environment
channels:
- defaults
dependencies:
- python=3.6.8
- cython=0.29.2
- numpy=1.18.1
- pip:
- azureml-sdk==0.1.0.*
- --index-url https://azuremlsdktestpypi.azureedge.net/dev/aml/office/134157926D8F
- --extra-index-url https://pypi.org/simple
- pandas==0.25.3
- pyarrow==0.16.0
- matplotlib==3.1.0
- git+https://github.com/microsoft/anomalydetector.git@1.1
| anomalydetector/aml_component/conda.yaml/0 | {
"file_path": "anomalydetector/aml_component/conda.yaml",
"repo_id": "anomalydetector",
"token_count": 199
} | 314 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
from collections import OrderedDict
from numbers import Number
from typing import Iterable, Mapping, Sequence
import torch
import torch.nn as nn
def summary(model, input_size):
result, params_info = summary_string(model, input_size)
print(result)
return params_info
def is_scaler(o):
return isinstance(o, Number) or isinstance(o, str) or o is None
def get_tensor_stat(tensor):
assert isinstance(tensor, torch.Tensor)
# some pytorch low-level memory management constant
# the minimal allocate memory size (Byte)
PYTORCH_MIN_ALLOCATE = 2**9
# the minimal cache memory size (Byte)
# PYTORCH_MIN_CACHE = 2**20
numel = tensor.numel()
element_size = tensor.element_size()
fact_numel = tensor.storage().size()
fact_memory_size = fact_numel * element_size
# since pytorch allocate at least 512 Bytes for any tensor, round
# up to a multiple of 512
memory_size = math.ceil(fact_memory_size / PYTORCH_MIN_ALLOCATE) * PYTORCH_MIN_ALLOCATE
# tensor.storage should be the actual object related to memory
# allocation
# data_ptr = tensor.storage().data_ptr()
size = tuple(tensor.size())
# torch scalar has empty size
if not size:
size = (1,)
return ([size], numel, memory_size)
def get_all_tensor_stats(o):
if is_scaler(o):
return ([[]], 0, 0)
elif isinstance(o, torch.Tensor):
return get_tensor_stat(o)
elif isinstance(o, Mapping):
return get_all_tensor_stats(o.values())
elif isinstance(o, Iterable): # tuple, list, maps
stats = [[]], 0, 0
for oi in o:
tz = get_all_tensor_stats(oi)
stats = tuple(x + y for x, y in zip(stats, tz))
return stats
elif hasattr(o, "__dict__"):
return get_all_tensor_stats(o.__dict__)
else:
return ([[]], 0, 0)
def get_shape(o):
if is_scaler(o):
return str(o)
elif hasattr(o, "shape"):
return f"shape{o.shape}"
elif hasattr(o, "size"):
return f"size{o.size()}"
elif isinstance(o, Sequence):
if len(o) == 0:
return "seq[]"
elif is_scaler(o[0]):
return f"seq[{len(o)}]"
return f"seq{[get_shape(oi) for oi in o]}"
elif isinstance(o, Mapping):
if len(o) == 0:
return "map[]"
elif is_scaler(next(o)):
return f"map[{len(o)}]"
arr = [(get_shape(ki), get_shape(vi)) for ki, vi in o]
return f"map{arr}"
else:
return "N/A"
def summary_string(model, input_size, dtype=torch.float32):
summary_str = ""
# create properties
summary = OrderedDict()
hooks = []
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input"] = get_all_tensor_stats(input)
summary[m_key]["output"] = get_all_tensor_stats(output)
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size()))).item()
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size()))).item()
summary[m_key]["nb_params"] = params
if not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList):
hooks.append(module.register_forward_hook(hook))
# batch_size of 2 for batchnorm
x = torch.rand(input_size, dtype=dtype, device=next(model.parameters()).device)
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(x)
# remove these hooks
for h in hooks:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output (elments, mem)", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_input = get_tensor_stat(x)
total_output = [[], 0, 0]
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output"][1:]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output = tuple(x + y for x, y in zip(total_output, summary[layer]["output"]))
if "trainable" in summary[layer]:
if summary[layer]["trainable"] is True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
total_numel = total_params + total_output[1] + total_input[1]
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params - trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += f"Input Elments: {total_input[1]:.4e}\n"
summary_str += f"Input Mem: {total_input[2]:.4e}\n"
summary_str += f"Layer Output Elements: {total_output[1]:.4e}\n"
summary_str += f"Layer Output Mem: {total_output[2]:.4e}\n"
summary_str += f"Params {total_params:.4e}\n"
summary_str += f"Total Elements {total_numel:.4e}\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str, (total_params, trainable_params)
| archai/archai/common/model_summary.py/0 | {
"file_path": "archai/archai/common/model_summary.py",
"repo_id": "archai",
"token_count": 2585
} | 315 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import CocoCaptions, CocoDetection
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class CocoDatasetProvider(DatasetProvider):
"""COCO-based dataset provider."""
SUPPORTED_DATASETS = {
"coco_captions": CocoCaptions,
"coco_detection": CocoDetection,
}
def __init__(
self,
dataset: Optional[str] = "coco_captions",
root: Optional[str] = "dataroot",
) -> None:
"""Initialize COCO-based dataset provider.
Args:
dataset: Name of dataset.
root: Root directory of dataset where is saved.
"""
super().__init__()
assert dataset in self.SUPPORTED_DATASETS, f"`dataset` should be one of: {list(self.SUPPORTED_DATASETS)}"
self.dataset = dataset
self.root = root
@overrides
def get_train_dataset(
self,
ann_file: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return self.SUPPORTED_DATASETS[self.dataset](
self.root,
ann_file,
transform=transform or ToTensor(),
target_transform=target_transform,
)
@overrides
def get_val_dataset(
self,
ann_file: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn(f"Validation set not available for `{self.dataset}`. Returning training set ...")
return self.get_train_dataset(ann_file, transform=transform, target_transform=target_transform)
@overrides
def get_test_dataset(
self,
ann_file: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn(f"Testing set not available for `{self.dataset}`. Returning validation set ...")
return self.get_val_dataset(ann_file, transform=transform, target_transform=target_transform)
| archai/archai/datasets/cv/coco_dataset_provider.py/0 | {
"file_path": "archai/archai/datasets/cv/coco_dataset_provider.py",
"repo_id": "archai",
"token_count": 975
} | 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import numpy as np
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.predictor import MeanVar, Predictor
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.api.search_results import SearchResults
from archai.discrete_search.api.search_space import (
BayesOptSearchSpace,
EvolutionarySearchSpace,
)
from archai.discrete_search.api.searcher import Searcher
from archai.discrete_search.predictors.dnn_ensemble import PredictiveDNNEnsemble
from archai.discrete_search.utils.multi_objective import get_non_dominated_sorting
logger = OrderedDictLogger(source=__name__)
class MoBananasSearch(Searcher):
"""Multi-objective version of BANANAS algorithm.
It has been proposed in `Bag of Baselines for Multi-objective Joint Neural Architecture
Search and Hyperparameter Optimization`.
Reference:
https://arxiv.org/abs/2105.01015
"""
def __init__(
self,
search_space: BayesOptSearchSpace,
search_objectives: SearchObjectives,
output_dir: str,
surrogate_model: Optional[Predictor] = None,
num_iters: Optional[int] = 10,
init_num_models: Optional[int] = 10,
num_parents: Optional[int] = 10,
mutations_per_parent: Optional[int] = 5,
num_candidates: Optional[int] = 10,
clear_evaluated_models: bool = True,
save_pareto_weights: bool = False,
seed: Optional[int] = 1,
) -> None:
"""Initialize the multi-objective BANANAS.
Args:
search_space: Discrete search space compatible with Bayesian Optimization algorithms.
search_objectives: Search objectives. Expensive objectives (registered with
`compute_intensive=True`) will be estimated using a surrogate model during
certain parts of the search. Cheap objectives will be always evaluated directly.
output_dir: Output directory.
surrogate_model: Surrogate model. If `None`, a `PredictiveDNNEnsemble` will be used.
num_iters: Number of iterations.
init_num_models: Number of initial models to evaluate.
num_parents: Number of parents to select for each iteration.
mutations_per_parent: Number of mutations to apply to each parent.
num_candidates: Number of selected models to add to evaluate in the next iteration.
clear_evaluated_models: Optimizes memory usage by clearing the architecture
of `ArchaiModel` after each iteration. Defaults to True
save_pareto_model_weights: If `False`, saves the weights of the pareto models.
seed: Random seed.
"""
super(MoBananasSearch, self).__init__()
assert isinstance(search_space, BayesOptSearchSpace)
assert isinstance(search_space, EvolutionarySearchSpace)
if surrogate_model:
assert isinstance(surrogate_model, Predictor)
else:
surrogate_model = PredictiveDNNEnsemble()
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True)
self.search_space = search_space
self.surrogate_model = surrogate_model
# Objectives
self.so = search_objectives
# Algorithm parameters
self.num_iters = num_iters
self.init_num_models = init_num_models
self.num_parents = num_parents
self.mutations_per_parent = mutations_per_parent
self.num_candidates = num_candidates
# Utils
self.clear_evaluated_models = clear_evaluated_models
self.save_pareto_weights = save_pareto_weights
self.seen_archs = set()
self.seed = seed
self.rng = np.random.RandomState(self.seed)
self.surrogate_dataset = []
self.search_state = SearchResults(search_space, search_objectives)
if self.save_pareto_weights:
raise NotImplementedError
def get_surrogate_iter_dataset(self, all_pop: List[ArchaiModel]) -> Tuple[np.ndarray, np.ndarray]:
"""Get the surrogate dataset for the current iteration.
Args:
all_pop: All population models.
Returns:
Tuple of encoded architectures and target values.
"""
encoded_archs = np.vstack([self.search_space.encode(m) for m in all_pop])
target = np.array([self.search_state.all_evaluated_objs[obj] for obj in self.so.expensive_objectives]).T
return encoded_archs, target
def sample_models(self, num_models: int, patience: Optional[int] = 30) -> List[ArchaiModel]:
"""Sample models from the search space.
Args:
num_models: Number of models to sample.
patience: Number of tries to sample a valid model.
Returns:
List of sampled models.
"""
nb_tries, valid_sample = 0, []
while len(valid_sample) < num_models and nb_tries < patience:
sample = [self.search_space.random_sample() for _ in range(num_models)]
_, valid_indices = self.so.validate_constraints(sample)
valid_sample += [sample[i] for i in valid_indices]
return valid_sample[:num_models]
def mutate_parents(
self, parents: List[ArchaiModel], mutations_per_parent: Optional[int] = 1, patience: Optional[int] = 30
) -> List[ArchaiModel]:
"""Mutate parents to generate new models.
Args:
parents: List of parent models.
mutations_per_parent: Number of mutations to apply to each parent.
patience: Number of tries to sample a valid model.
Returns:
List of mutated models.
"""
mutations = {}
for p in parents:
candidates = {}
nb_tries = 0
while len(candidates) < mutations_per_parent and nb_tries < patience:
mutated_model = self.search_space.mutate(p)
mutated_model.metadata["parent"] = p.archid
if not self.so.is_model_valid(mutated_model):
continue
if mutated_model.archid not in self.seen_archs:
candidates[mutated_model.archid] = mutated_model
nb_tries += 1
mutations.update(candidates)
if len(mutations) == 0:
logger.warn(f"No mutations found after {patience} tries for each one of the {len(parents)} parents.")
return list(mutations.values())
def predict_expensive_objectives(self, archs: List[ArchaiModel]) -> Dict[str, MeanVar]:
"""Predict expensive objectives for `archs` using surrogate model.
Args:
archs: List of architectures.
Returns:
Dictionary of predicted expensive objectives.
"""
encoded_archs = np.vstack([self.search_space.encode(m) for m in archs])
pred_results = self.surrogate_model.predict(encoded_archs)
return {
obj_name: MeanVar(pred_results.mean[:, i], pred_results.var[:, i])
for i, obj_name in enumerate(self.so.expensive_objectives)
}
def thompson_sampling(
self,
archs: List[ArchaiModel],
sample_size: int,
pred_expensive_objs: Dict[str, MeanVar],
cheap_objs: Dict[str, np.ndarray],
) -> List[int]:
"""Get the selected architecture list indices from Thompson Sampling.
Args:
archs: List of architectures.
sample_size: Number of architectures to select.
pred_expensive_objs: Predicted expensive objectives.
cheap_objs: Cheap objectives.
Returns:
List of selected architecture indices.
"""
simulation_results = cheap_objs
# Simulates results from surrogate model assuming N(pred_mean, pred_std)
simulation_results.update(
{
obj_name: self.rng.randn(len(archs)) * np.sqrt(pred.var) + pred.mean
for obj_name, pred in pred_expensive_objs.items()
}
)
# Performs non-dominated sorting
nds_frontiers = get_non_dominated_sorting(archs, simulation_results, self.so)
# Shuffle elements inside each frontier to avoid giving advantage to a specific
# part of the nds frontiers
for frontier in nds_frontiers:
self.rng.shuffle(frontier["indices"])
return [idx for frontier in nds_frontiers for idx in frontier["indices"]][:sample_size]
@overrides
def search(self) -> SearchResults:
all_pop, selected_indices, pred_expensive_objs = [], [], {}
unseen_pop = self.sample_models(self.init_num_models)
for i in range(self.num_iters):
self.on_start_iteration(i + 1)
logger.info(f"Iteration {i+1}/{self.num_iters}")
all_pop.extend(unseen_pop)
logger.info(f"Evaluating objectives for {len(unseen_pop)} architectures ...")
iter_results = self.so.eval_all_objs(unseen_pop)
self.seen_archs.update([m.archid for m in unseen_pop])
# Adds iteration results and predictions from the previous iteration for comparison
extra_model_data = {
f"Predicted {obj_name} {c}": getattr(obj_results, c)[selected_indices]
for obj_name, obj_results in pred_expensive_objs.items()
for c in ["mean", "var"]
}
self.search_state.add_iteration_results(unseen_pop, iter_results, extra_model_data)
# Clears models from memory if needed
if self.clear_evaluated_models:
for m in unseen_pop:
m.clear()
# Updates surrogate
logger.info("Updating surrogate model ...")
X, y = self.get_surrogate_iter_dataset(all_pop)
self.surrogate_model.fit(X, y)
# Selects top-`num_parents` models from non-dominated sorted results
nds_frontiers = get_non_dominated_sorting(all_pop, self.search_state.all_evaluated_objs, self.so)
parents = [model for frontier in nds_frontiers for model in frontier["models"]]
parents = parents[: self.num_parents]
# Mutates top models
logger.info(f"Generating mutations for {len(parents)} parent architectures ...")
mutated = self.mutate_parents(parents, self.mutations_per_parent)
logger.info(f"Found {len(mutated)} new architectures satisfying constraints.")
if not mutated:
logger.info("No new architectures found. Stopping search ...")
break
# Predicts expensive objectives using surrogate model
# and calculates cheap objectives for mutated architectures
logger.info(f"Predicting {self.so.expensive_objective_names} for new architectures using surrogate model ...")
pred_expensive_objs = self.predict_expensive_objectives(mutated)
logger.info(f"Calculating cheap objectives {self.so.cheap_objective_names} for new architectures ...")
cheap_objs = self.so.eval_cheap_objs(mutated)
# Selects `num_candidates`-archtiectures for next iteration using Thompson Sampling
selected_indices = self.thompson_sampling(mutated, self.num_candidates, pred_expensive_objs, cheap_objs)
unseen_pop = [mutated[i] for i in selected_indices]
logger.info(f"Best {self.num_candidates} candidate architectures were selected for the next iteration.")
# Save plots and reports
self.search_state.save_all_2d_pareto_evolution_plots(self.output_dir)
self.search_state.save_search_state(self.output_dir / f"search_state_{i}.csv")
return self.search_state
| archai/archai/discrete_search/algos/bananas.py/0 | {
"file_path": "archai/archai/discrete_search/algos/bananas.py",
"repo_id": "archai",
"token_count": 5026
} | 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from typing import Any, Dict, Optional
import nats_bench
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator
from archai.discrete_search.search_spaces.benchmark.natsbench_tss import (
NatsbenchTssSearchSpace
)
class NatsbenchMetric(ModelEvaluator):
"""Evaluate a model using a metric from the NATS-Bench API."""
def __init__(
self,
search_space: NatsbenchTssSearchSpace,
metric_name: str,
epochs: Optional[int] = None,
raise_not_found: Optional[bool] = True,
more_info_kwargs: Optional[Dict[str, Any]] = None,
cost_info_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the evaluator.
Args:
search_space: Search space to use.
metric_name: Metric to use. See `nats_bench.api.NatsBenchAPI.get_more_info` for available metrics.
epochs: Number of epochs to use. If None, uses the default number of epochs.
raise_not_found: If True, raises an error if the architecture does not belong to the search space.
more_info_kwargs: Additional arguments to pass to `nats_bench.api.NatsBenchAPI.get_more_info`.
cost_info_kwargs: Additional arguments to pass to `nats_bench.api.NatsBenchAPI.get_cost_info`.
"""
assert isinstance(
search_space, NatsbenchTssSearchSpace
), "This objective function only works with architectures from NatsbenchTssSearchSpace"
self.search_space = search_space
self.metric_name = metric_name
self.epochs = epochs
self.archid_pattern = re.compile("natsbench-tss-([0-9]+)")
self.api = nats_bench.create(str(self.search_space.natsbench_location), "tss", fast_mode=True, verbose=False)
self.raise_not_found = raise_not_found
self.more_info_kwargs = more_info_kwargs or dict()
self.cost_info_kwargs = cost_info_kwargs or dict()
self.total_time_spent = 0
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> Optional[float]:
natsbench_id = self.archid_pattern.match(model.archid)
budget = int(budget) if budget else budget
if not natsbench_id:
if self.raise_not_found:
raise ValueError(
f"Architecture {model.archid} does not belong to the NatsBench search space. "
"Please refer to `archai.search_spaces.discrete.NatsbenchSearchSpace` to "
"use the Natsbench search space."
)
return None
info = self.api.get_more_info(
int(natsbench_id.group(1)),
dataset=self.search_space.base_dataset,
iepoch=budget or self.epochs,
**self.more_info_kwargs,
)
cost_info = self.api.get_cost_info(
int(natsbench_id.group(1)), dataset=self.search_space.base_dataset, **self.cost_info_kwargs
)
if self.metric_name in info:
result = info[self.metric_name]
self.total_time_spent += info["train-all-time"] + info["test-all-time"]
elif self.metric_name in cost_info:
result = info[self.metric_name]
else:
raise KeyError(f"`metric_name` {self.metric_name} not found. Available metrics = {str(list(info.keys()))}")
return result
| archai/archai/discrete_search/evaluators/benchmark/natsbench_tss.py/0 | {
"file_path": "archai/archai/discrete_search/evaluators/benchmark/natsbench_tss.py",
"repo_id": "archai",
"token_count": 1543
} | 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as f
from overrides import overrides
from tqdm import tqdm
from archai.discrete_search.api.predictor import MeanVar, Predictor
class PredictiveDNNEnsemble(Predictor):
"""Deep Neural Network Ensemble predictor.
Predicts the outcome of a set of expensive objectives using an ensemble of MLP models.
"""
def __init__(
self,
num_ensemble_members: Optional[int] = 5,
num_layers: Optional[int] = 5,
width: Optional[int] = 64,
lr: Optional[float] = 1e-4,
num_tr_steps: Optional[int] = 2_000,
replace_nan_value: float = -1.0,
device: Optional[str] = "cuda",
) -> None:
"""Initialize the predictor.
Args:
num_ensemble_members: Number of ensemble members.
num_layers: Number of layers of each member.
width: Number of neurons in each hidden layer.
lr: Learning rate of each ensemble mmember.
num_tr_steps: Number of training steps of each member.
replace_nan_value: Value to replace NaNs (often used to represent an unused
architecture parameters). Default to -1.0.
device: Device to use for training.
"""
self.num_ensemble_members = num_ensemble_members
self.num_layers = num_layers
self.width = width
self.lr = lr
self.num_tr_steps = num_tr_steps
self.replace_nan_value = replace_nan_value
self.is_fit = False
self.device = device
self.X_meanvar = None
self.y_meanvar = None
def to_cuda(self) -> None:
"""Moves the predictor to CUDA."""
for m in self.ensemble:
m.cuda()
self.device = "cuda"
def to_cpu(self) -> None:
"""Moves the predictor to CPU."""
for m in self.ensemble:
m.cpu()
self.device = "cpu"
@overrides
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
assert len(X.shape) == 2
assert len(y.shape) == 2
_, num_features = X.shape
_, num_objectives = y.shape
X = np.nan_to_num(X, nan=self.replace_nan_value)
self.X_meansd = np.mean(X, axis=0), np.std(X, axis=0)
self.y_meansd = np.mean(y, axis=0), np.std(y, axis=0)
# Initialize ensemble models
self.ensemble = [
FFEnsembleMember(num_objectives, num_features, self.num_layers, self.width).to(self.device)
for _ in range(self.num_ensemble_members)
]
# Normalizes features and targets
X = (X.copy() - self.X_meansd[0]) / (self.X_meansd[1] + 1e-7)
y = (y.copy() - self.y_meansd[0]) / (self.y_meansd[1] + 1e-7)
Xt = torch.tensor(X, dtype=torch.float32).to(self.device)
yt = torch.tensor(y, dtype=torch.float32).to(self.device)
# TODO: should we be splitting data into
# train and val?
for member in tqdm(self.ensemble, desc="Training DNN Ensemble..."):
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.Adam(member.parameters(), lr=self.lr)
member.train()
for t in range(self.num_tr_steps):
y_pred = member(Xt)
loss = criterion(y_pred.squeeze(), yt.squeeze())
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.is_fit = True
@overrides
def predict(self, X: np.ndarray) -> MeanVar:
assert len(X.shape) == 2
assert self.is_fit, "PredictiveDNNEnsemble: predict called before fit!"
X = (X.copy() - self.X_meansd[0]) / (self.X_meansd[1] + 1e-7)
Xt = torch.tensor(X, dtype=torch.float32).to(self.device)
preds = []
with torch.no_grad():
for member in self.ensemble:
member.eval()
pred = member(Xt).to("cpu").numpy()
preds.append(pred * (self.y_meansd[1] + 1e-7) + self.y_meansd[0])
preds = np.array(preds)
return MeanVar(mean=np.mean(preds, axis=0), var=np.var(preds, axis=0))
class FFEnsembleMember(nn.Module):
"""Feedforward ensemble member."""
def __init__(
self,
num_objectives: Optional[int] = 1,
input_feat_len: Optional[int] = 128,
num_layers: Optional[int] = 10,
width: Optional[int] = 20,
) -> None:
"""Initialize the ensemble member.
Args:
num_objectives: Number of objectives.
input_feat_len: Length of input features.
num_layers: Number of layers.
width: Width of each layer.
"""
super(FFEnsembleMember, self).__init__()
self.input_feat_len = input_feat_len
self.num_layers = num_layers
self.width = width
self.linears = nn.ModuleList([nn.Linear(self.input_feat_len, width)])
self.linears.extend([nn.Linear(width, width) for i in range(1, self.num_layers - 1)])
self.output = nn.Linear(width, num_objectives)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for layer in self.linears:
x = f.relu(layer(x))
return self.output(x)
| archai/archai/discrete_search/predictors/dnn_ensemble.py/0 | {
"file_path": "archai/archai/discrete_search/predictors/dnn_ensemble.py",
"repo_id": "archai",
"token_count": 2505
} | 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import TransformerFlexSearchSpace
from archai.discrete_search.search_spaces.nlp.tfpp import TfppSearchSpace
| archai/archai/discrete_search/search_spaces/nlp/__init__.py/0 | {
"file_path": "archai/archai/discrete_search/search_spaces/nlp/__init__.py",
"repo_id": "archai",
"token_count": 77
} | 320 |
'''Adapted from https://github.com/lucidrains/local-attention.'''
import math
from typing import Optional
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, pack, unpack
from archai.discrete_search.search_spaces.config import ArchConfig
TOKEN_SELF_ATTN_VALUE = -5e4
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
n = x.shape[-2]
t = torch.arange(n, device = x.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, 'b ... (r d) -> b (...) r d', r = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(q, k, freqs):
q, k = map(lambda t: (t * freqs.cos()) + (rotate_half(t) * freqs.sin()), (q, k))
return q, k
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return False, tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return True, F.pad(tensor, (*pad_offset, 0, remainder), value = value)
def look_around(x, backward = 1, forward = 0, pad_value = -1, dim = 2):
t = x.shape[1]
dims = (len(x.shape) - dim) * (0, 0)
padded_x = F.pad(x, (*dims, backward, forward), value = pad_value)
tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]
return torch.cat(tensors, dim = dim)
class LocalAttention(nn.Module):
def __init__(
self,
window_size,
causal = False,
look_backward = 1,
look_forward = None,
dropout = 0.,
autopad = False,
exact_windowsize = False,
pad_value: int = -1,
rel_pos_emb_dim: Optional[int] = None,
**kwargs
):
super().__init__()
look_forward = look_forward or (0 if causal else 1)
assert not (causal and look_forward > 0)
self.window_size = window_size
self.autopad = autopad
self.exact_windowsize = exact_windowsize
self.causal = causal
self.look_backward = look_backward
self.look_forward = look_forward
self.pad_value = pad_value
self.dropout = nn.Dropout(dropout)
self.rel_pos = None
if rel_pos_emb_dim is not None: # backwards compatible with old `rel_pos_emb_config` deprecated argument
self.rel_pos = SinusoidalEmbeddings(rel_pos_emb_dim)
def forward(self, q, k, v, bin_attention_mask: Optional[torch.FloatTensor] = None):
# https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb
(q, packed_shape), (k, _), (v, _) = map(lambda t: pack([t], '* n d'), (q, k, v))
if self.rel_pos is not None:
pos_emb = self.rel_pos(q)
q, k = apply_rotary_pos_emb(q, k, pos_emb)
# auto padding
if self.autopad:
orig_seq_len = q.shape[1]
(needed_pad, q), (_, k), (_, v) = map(lambda t: pad_to_multiple(t, self.window_size, dim = -2), (q, k, v))
b, n, dim_head, device, dtype = *q.shape, q.device, q.dtype
scale = dim_head ** -0.5
assert (n % self.window_size) == 0, f'sequence length {n} must be divisible by window size {self.window_size} for local attention'
windows = n // self.window_size
seq = torch.arange(n, device = device)
b_t = rearrange(seq, '(w n) -> 1 w n', w = windows, n = self.window_size)
bq, bk, bv = map(lambda t: rearrange(t, 'b (w n) d -> b w n d', w = windows), (q, k, v))
look_around_kwargs = dict(
backward = self.look_backward,
forward = self.look_forward,
pad_value = self.pad_value
)
bk = look_around(bk, **look_around_kwargs)
bv = look_around(bv, **look_around_kwargs)
bq_t = b_t
bq_k = look_around(b_t, **look_around_kwargs)
bq_t = rearrange(bq_t, '... i -> ... i 1')
bq_k = rearrange(bq_k, '... j -> ... 1 j')
sim = einsum('b h i e, b h j e -> b h i j', bq, bk) * scale
mask_value = max_neg_value(sim)
if self.causal:
causal_mask = bq_t < bq_k
if self.exact_windowsize:
max_causal_window_size = (self.window_size * self.look_backward)
causal_mask = causal_mask | (bq_t > (bq_k + max_causal_window_size))
sim = sim.masked_fill(causal_mask, mask_value)
del causal_mask
# mask out padding value
if self.autopad and needed_pad:
pad_mask = bq_k == self.pad_value
sim = sim.masked_fill(pad_mask, mask_value)
del pad_mask
if bin_attention_mask is not None:
mask = bin_attention_mask.bool()
batch = bin_attention_mask.shape[0]
assert (b % batch) == 0
h = b // bin_attention_mask.shape[0]
if self.autopad:
_, mask = pad_to_multiple(mask, self.window_size, dim=-1, value=False)
mask = rearrange(mask, '... (w n) -> (...) w n', w = windows, n = self.window_size)
mask = look_around(mask, **{**look_around_kwargs, 'pad_value': False})
mask = rearrange(mask, '... j -> ... 1 j')
mask = repeat(mask, 'b ... -> (b h) ...', h = h)
sim = sim.masked_fill(~mask, mask_value)
del mask
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregation
out = einsum('b h i j, b h j e -> b h i e', attn, bv)
out = rearrange(out, 'b w n d -> b (w n) d')
if self.autopad:
out = out[:, :orig_seq_len, :]
out, *_ = unpack(out, packed_shape, '* n d')
return out
class LocalMHA(nn.Module):
def __init__(
self,
arch_config: ArchConfig,
hidden_size: int,
total_heads: int,
op_heads: int,
att_dropout = 0.,
prenorm = False,
use_rotary: bool = True,
**kwargs
):
super().__init__()
assert hidden_size % total_heads == 0, 'hidden size must be divisible by total heads'
self.hidden_size = hidden_size
self.total_heads = total_heads
self.op_heads = op_heads
head_size = self.hidden_size // self.total_heads
self.op_size = head_size * self.op_heads
self.norm = nn.LayerNorm(hidden_size) if prenorm else None
self.to_qkv = nn.Linear(hidden_size, self.op_size * 3, bias = False)
self.attn_fn = LocalAttention(
window_size = arch_config.pick('window_size'),
causal = True,
autopad = True,
exact_windowsize = True,
dropout=att_dropout,
rel_pos_emb_dim=(head_size if use_rotary else None),
**kwargs
)
def forward(self, hidden_states, bin_attention_mask: Optional[torch.LongTensor] = None, **kwargs):
if self.norm is not None:
hidden_states = self.norm(hidden_states)
q, k, v = self.to_qkv(hidden_states).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.op_heads), (q, k, v))
out = self.attn_fn(q, k, v, bin_attention_mask=bin_attention_mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return out, None
| archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/local_attention.py/0 | {
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/local_attention.py",
"repo_id": "archai",
"token_count": 3729
} | 321 |
# Downloaded from https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/functional/toeplitz.py
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
| archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/toeplitz.py/0 | {
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/toeplitz.py",
"repo_id": "archai",
"token_count": 2744
} | 322 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import torch
import torch.nn as nn
from transformers.activations import ACT2FN
from transformers.models.gpt2.modeling_gpt2 import (
GPT2MLP,
GPT2Attention,
GPT2Block,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
)
from transformers.pytorch_utils import Conv1D
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.configuration_gpt2_flex import (
GPT2FlexConfig,
)
class GPT2FlexAttention(GPT2Attention):
def __init__(
self,
config: GPT2FlexConfig,
is_cross_attention: Optional[bool] = False,
layer_idx: Optional[int] = None,
) -> None:
nn.Module.__init__(self)
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads[layer_idx]
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
class GPT2FlexMLP(GPT2MLP):
def __init__(self, intermediate_size: int, config: GPT2FlexConfig) -> None:
nn.Module.__init__(self)
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
self.primer_square = config.primer_square
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
if self.primer_square:
hidden_states = hidden_states**2
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPT2FlexBlock(GPT2Block):
def __init__(self, config: GPT2FlexConfig, layer_idx: Optional[int] = None) -> None:
nn.Module.__init__(self)
hidden_size = config.hidden_size
inner_dim = config.n_inner[layer_idx]
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPT2FlexAttention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = GPT2FlexAttention(config, is_cross_attention=True, layer_idx=layer_idx)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPT2FlexMLP(inner_dim, config)
class GPT2FlexModel(GPT2Model):
config_class = GPT2FlexConfig
def __init__(self, config: GPT2FlexConfig) -> None:
GPT2PreTrainedModel.__init__(self, config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2FlexBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
self.post_init()
class GPT2FlexLMHeadModel(GPT2LMHeadModel):
config_class = GPT2FlexConfig
def __init__(self, config: GPT2FlexConfig) -> None:
GPT2PreTrainedModel.__init__(self, config)
self.transformer = GPT2FlexModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.model_parallel = False
self.device_map = None
self.post_init()
| archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/modeling_gpt2_flex.py/0 | {
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/modeling_gpt2_flex.py",
"repo_id": "archai",
"token_count": 2327
} | 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
class AttentionMaskFormat:
"""Enumerate the attention mask shape."""
MaskIndexEnd = 0
MaskIndexEndAndStart = 1
AttentionMask = 2
NoMask = 3
class FusionOptions:
"""Options to control the fusion of operators in the ONNX graph."""
def __init__(self, model_type: str) -> None:
"""Initialize the fusion options.
Args:
model_type: Type of model.
"""
self.enable_shape_inference = True
self.enable_qordered_matmul = True
self.enable_gelu = True
self.enable_bias_gelu = True
self.enable_gelu_approximation = False
self.enable_gemm_fast_gelu = False
self.enable_layer_norm = True
self.enable_embed_layer_norm = True
self.enable_skip_layer_norm = True
self.enable_bias_skip_layer_norm = True
if model_type in ["gpt2", "gpt2-flex"]:
self.enable_embed_layer_norm = False
self.enable_skip_layer_norm = False
self.enable_attention = True
self.use_multi_head_attention = False
self.attention_mask_format = AttentionMaskFormat.AttentionMask
def use_raw_attention_mask(self, use_raw_mask: Optional[bool] = True) -> None:
"""Enable the usage of raw attention mask.
Args:
use_raw_mask: Whether raw mask should be used or not.
"""
if use_raw_mask:
self.attention_mask_format = AttentionMaskFormat.AttentionMask
else:
self.attention_mask_format = AttentionMaskFormat.MaskIndexEnd
def disable_attention_mask(self) -> None:
"""Disable the usage of attention mask."""
self.attention_mask_format = AttentionMaskFormat.NoMask
| archai/archai/onnx/optimization_utils/fusion_options.py/0 | {
"file_path": "archai/archai/onnx/optimization_utils/fusion_options.py",
"repo_id": "archai",
"token_count": 738
} | 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import Iterator
import torch
from torch import Tensor, autograd, nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
from archai.common import ml_utils
from archai.common.config import Config
from archai.common.utils import zip_eq
from archai.supergraph.nas.model import Model
def _get_loss(model:Model, lossfn, x, y):
logits, *_ = model(x) # might also return aux tower logits
return lossfn(logits, y)
def _get_alphas(model:Model)->Iterator[nn.Parameter]:
return model.all_owned().param_by_kind('alphas')
class BilevelOptimizer:
def __init__(self, conf_alpha_optim:Config, w_momentum: float, w_decay: float,
model: Model, lossfn: _Loss, device, batch_chunks:int) -> None:
self._w_momentum = w_momentum # momentum for w
self._w_weight_decay = w_decay # weight decay for w
self._lossfn = lossfn
self._model = model # main model with respect to w and alpha
self.batch_chunks = batch_chunks
self.device = device
# create a copy of model which we will use
# to compute grads for alphas without disturbing
# original weights
self._vmodel = copy.deepcopy(model).to(device)
self._alphas = list(_get_alphas(self._model))
self._valphas = list(_get_alphas(self._vmodel))
# this is the optimizer to optimize alphas parameter
self._alpha_optim = ml_utils.create_optimizer(conf_alpha_optim, self._alphas)
def state_dict(self)->dict:
return {
'alpha_optim': self._alpha_optim.state_dict(),
'vmodel': self._vmodel.state_dict()
}
def load_state_dict(self, state_dict)->None:
self._vmodel.load_state_dict(state_dict['vmodel'])
self._alpha_optim.load_state_dict(state_dict['alpha_optim'])
# NOTE: Original dart paper uses all paramaeters which includes ops weights
# as well as stems and alphas however in theory it should only be using
# ops weights. Below you can conduct experiment by replacing parameters()
# with weights() but that tanks accuracy below 97.0 for cifar10
def _model_params(self):
return self._model.parameters()
#return self._model.nonarch_params(recurse=True)
def _vmodel_params(self):
return self._vmodel.parameters()
#return self._vmodel.nonarch_params(recurse=True)
def _update_vmodel(self, x, y, lr: float, w_optim: Optimizer) -> None:
""" Update vmodel with w' (main model has w) """
# TODO: should this loss be stored for later use?
loss = _get_loss(self._model, self._lossfn, x, y)
gradients = autograd.grad(loss, self._model_params())
"""update weights in vmodel so we leave main model undisturbed
The main technical difficulty computing w' without affecting alphas is
that you can't simply do backward() and step() on loss because loss
tracks alphas as well as w. So, we compute gradients using autograd and
do manual sgd update."""
# TODO: other alternative may be to (1) copy model
# (2) set require_grads = False on alphas
# (3) loss and step on vmodel (4) set back require_grads = True
with torch.no_grad(): # no need to track gradient for these operations
for w, vw, g in zip(
self._model_params(), self._vmodel_params(), gradients):
# simulate momentum update on model but put this update in vmodel
m = w_optim.state[w].get(
'momentum_buffer', 0.)*self._w_momentum
vw.copy_(w - lr * (m + g + self._w_weight_decay*w))
# synchronize alphas
for a, va in zip_eq(self._alphas, self._valphas):
va.copy_(a)
def step(self, x_train: Tensor, y_train: Tensor, x_valid: Tensor, y_valid: Tensor,
w_optim: Optimizer) -> None:
# TODO: unlike darts paper, we get lr from optimizer insead of scheduler
lr = w_optim.param_groups[0]['lr']
self._alpha_optim.zero_grad()
# divide batch in to chunks if needed so it fits in GPU RAM
if self.batch_chunks > 1:
xt_chunks, yt_chunks = torch.chunk(x_train, self.batch_chunks), torch.chunk(y_train, self.batch_chunks)
xv_chunks, yv_chuncks = torch.chunk(x_valid, self.batch_chunks), torch.chunk(y_valid, self.batch_chunks)
else:
xt_chunks, yt_chunks = (x_train,), (y_train,)
xv_chunks, yv_chuncks = (x_valid,), (y_valid,)
for xtc, ytc, xvc, yvc in zip(xt_chunks, yt_chunks, xv_chunks, yv_chuncks):
xtc, ytc = xtc.to(self.device), ytc.to(self.device, non_blocking=True)
xvc, yvc = xvc.to(self.device), yvc.to(self.device, non_blocking=True)
# compute the gradient and write it into tensor.grad
# instead of generated by loss.backward()
self._backward_bilevel(xtc, ytc, xvc, yvc,lr, w_optim)
# at this point we should have model with updated gradients for w and alpha
self._alpha_optim.step()
def _backward_bilevel(self, x_train, y_train, x_valid, y_valid, lr, w_optim):
""" Compute unrolled loss and backward its gradients """
# update vmodel with w', but leave alphas as-is
# w' = w - lr * grad
self._update_vmodel(x_train, y_train, lr, w_optim)
# compute loss on validation set for model with w'
# wrt alphas. The autograd.grad is used instead of backward()
# to avoid having to loop through params
vloss = _get_loss(self._vmodel, self._lossfn, x_valid, y_valid)
v_alphas = tuple(self._valphas)
v_weights = tuple(self._vmodel_params())
# TODO: if v_weights = all params then below does double counting of alpahs
v_grads = autograd.grad(vloss, v_alphas + v_weights)
# grad(L(w', a), a), part of Eq. 6
dalpha = v_grads[:len(v_alphas)]
# get grades for w' params which we will use it to compute w+ and w-
dw = v_grads[len(v_alphas):]
hessian = self._hessian_vector_product(dw, x_train, y_train)
# dalpha we have is from the unrolled model so we need to
# transfer those grades back to our main model
# update final gradient = dalpha - xi*hessian
# TODO: currently alphas lr is same as w lr
with torch.no_grad():
for alpha, da, h in zip(self._alphas, dalpha, hessian):
alpha.grad = da - lr*h
# now that model has both w and alpha grads,
# we can run w_optim.step() to update the param values
def _hessian_vector_product(self, dw, x, y, epsilon_unit=1e-2):
"""
Implements equation 8
dw = dw` {L_val(w`, alpha)}
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha {L_trn(w+, alpha)} -dalpha {L_trn(w-, alpha)})/(2*eps)
eps = 0.01 / ||dw||
"""
"""scale epsilon with grad magnitude. The dw
is a multiplier on RHS of eq 8. So this scalling is essential
in making sure that finite differences approximation is not way off
Below, we flatten each w, concate all and then take norm"""
# TODO: is cat along dim 0 correct?
dw_norm = torch.cat([w.view(-1) for w in dw]).norm()
epsilon = epsilon_unit / dw_norm
# w+ = w + epsilon * grad(w')
with torch.no_grad():
for p, v in zip(self._model_params(), dw):
p += epsilon * v
# Now that we have model with w+, we need to compute grads wrt alphas
# This loss needs to be on train set, not validation set
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_plus = autograd.grad(
loss, self._alphas) # dalpha{L_trn(w+)}
# get model with w- and then compute grads wrt alphas
# w- = w - eps*dw`
with torch.no_grad():
for p, v in zip(self._model_params(), dw):
# we had already added dw above so sutracting twice gives w-
p -= 2. * epsilon * v
# similarly get dalpha_minus
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_minus = autograd.grad(loss, self._alphas)
# reset back params to original values by adding dw
with torch.no_grad():
for p, v in zip(self._model_params(), dw):
p += epsilon * v
# apply eq 8, final difference to compute hessian
h = [(p - m) / (2. * epsilon)
for p, m in zip(dalpha_plus, dalpha_minus)]
return h
| archai/archai/supergraph/algos/darts/bilevel_optimizer.py/0 | {
"file_path": "archai/archai/supergraph/algos/darts/bilevel_optimizer.py",
"repo_id": "archai",
"token_count": 3742
} | 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List
import numpy as np
from archai.supergraph.algos.divnas.wmr import Wmr
class SeqOpt:
""" Implements SeqOpt
TODO: Later on we might want to refactor this class
to be able to handle bandit feedback """
def __init__(self, num_items:int, eps:float):
self._num_items = num_items
# initialize wmr copies
self._expert_algos = [Wmr(self._num_items, eps) for i in range(self._num_items)]
def sample_sequence(self, with_replacement=False)->List[int]:
sel_set = set()
# to keep order information
sel_list = []
counter = 0
counter_limit = 10000
for i in range(self._num_items):
item_id = self._expert_algos[i].sample()
if not with_replacement:
# NOTE: this might be an infinite while loop
while item_id in sel_set and counter < counter_limit:
item_id = self._expert_algos[i].sample()
counter += 1
if counter >= counter_limit:
print('Got caught in infinite loop for a while')
sel_set.add(item_id)
sel_list.append(item_id)
return sel_list
def _check_marg_gains(self, reward_storage:List[List[float]])->bool:
reward_array = np.array(reward_storage)
is_descending = True
for i in range(reward_array.shape[1]):
marg_gains_this_item = reward_array[:,i]
is_descending = np.all(np.diff(marg_gains_this_item)<=0)
if not is_descending:
return is_descending
return is_descending
def _scale_minus_one_to_one(self, rewards:np.array)->np.array:
scaled = np.interp(rewards, (rewards.min(), rewards.max()), (-1, 1))
return scaled
def update(self, sel_list:List[int], compute_marginal_gain_func)->None:
""" In the full information case we will update
all expert copies according to the marginal benefits """
# mother set
S = set([i for i in range(self._num_items)])
reward_storage = []
# for each slot
for slot_id in range(self._num_items):
# for each action in the slot
sub_sel = set(sel_list[:slot_id])
reward_vector = []
for item in range(self._num_items):
# the function passed in
# must already be bound to the
# covariance function needed
reward = compute_marginal_gain_func(item, sub_sel, S)
reward_vector.append(reward)
# update the expert algo copy for this slot
scaled_rewards = self._scale_minus_one_to_one(np.array(reward_vector))
self._expert_algos[slot_id].update(scaled_rewards)
reward_storage.append(reward_vector)
# # Uncomment to aid in debugging
# np.set_printoptions(precision=3, suppress=True)
# print('Marginal gain array (item_id X slots)')
# print(np.array(reward_storage).T)
# is_descending = self._check_marg_gains(reward_storage)
# if not is_descending:
# print('WARNING marginal gains are not diminishing')
| archai/archai/supergraph/algos/divnas/seqopt.py/0 | {
"file_path": "archai/archai/supergraph/algos/divnas/seqopt.py",
"repo_id": "archai",
"token_count": 1492
} | 326 |
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration flags."""
from __future__ import absolute_import, division, print_function
from absl import flags
FLAGS = flags.FLAGS
# Data flags (only required for generating the dataset)
flags.DEFINE_list(
'train_data_files', [],
'Training data files in TFRecord format. Multiple files can be passed in a'
' comma-separated list. The first file in the list will be used for'
' computing the training error.')
flags.DEFINE_string(
'valid_data_file', '', 'Validation data in TFRecord format.')
flags.DEFINE_string(
'test_data_file', '', 'Testing data in TFRecord format.')
flags.DEFINE_string(
'sample_data_file', '', 'Sampled batch data in TFRecord format.')
flags.DEFINE_string(
'data_format', 'channels_last',
'Data format, one of [channels_last, channels_first] for NHWC and NCHW'
' tensor formats respectively.')
flags.DEFINE_integer(
'num_labels', 10, 'Number of input class labels.')
# Search space parameters.
flags.DEFINE_integer(
'module_vertices', 7,
'Number of vertices in module matrix, including input and output.')
flags.DEFINE_integer(
'max_edges', 9,
'Maximum number of edges in the module matrix.')
flags.DEFINE_list(
'available_ops', ['conv3x3-bn-relu', 'conv1x1-bn-relu', 'maxpool3x3'],
'Available op labels, see base_ops.py for full list of ops.')
# Model hyperparameters. The default values are exactly what is used during the
# exhaustive evaluation of all models.
flags.DEFINE_integer(
'stem_filter_size', 128, 'Filter size after stem convolutions.')
flags.DEFINE_integer(
'num_stacks', 3, 'Number of stacks of modules.')
flags.DEFINE_integer(
'num_modules_per_stack', 3, 'Number of modules per stack.')
flags.DEFINE_integer(
'batch_size', 256, 'Training batch size.')
flags.DEFINE_integer(
'train_epochs', 108,
'Maximum training epochs. If --train_seconds is reached first, training'
' may not reach --train_epochs.')
flags.DEFINE_float(
'train_seconds', 4.0 * 60 * 60,
'Maximum training seconds. If --train_epochs is reached first, training'
' may not reach --train_seconds. Used as safeguard against stalled jobs.'
' If train_seconds is 0.0, no time limit will be used.')
flags.DEFINE_float(
'learning_rate', 0.1,
'Base learning rate. Linearly scaled by --tpu_num_shards.')
flags.DEFINE_string(
'lr_decay_method', 'COSINE_BY_STEP',
'[COSINE_BY_TIME, COSINE_BY_STEP, STEPWISE], see model_builder.py for full'
' list of decay methods.')
flags.DEFINE_float(
'momentum', 0.9, 'Momentum.')
flags.DEFINE_float(
'weight_decay', 1e-4, 'L2 regularization weight.')
flags.DEFINE_integer(
'max_attempts', 5,
'Maximum number of times to try training and evaluating an individual'
' before aborting.')
flags.DEFINE_list(
'intermediate_evaluations', ['0.5'],
'Intermediate evaluations relative to --train_epochs. For example, to'
' evaluate the model at 1/4, 1/2, 3/4 of the total epochs, use [0.25, 0.5,'
' 0.75]. An evaluation is always done at the start and end of training.')
flags.DEFINE_integer(
'num_repeats', 3,
'Number of repeats evaluated for each model in the space.')
# TPU flags
flags.DEFINE_bool(
'use_tpu', True, 'Use TPUs for train and evaluation.')
flags.DEFINE_integer(
'tpu_iterations_per_loop', 100, 'Iterations per loop of TPU execution.')
flags.DEFINE_integer(
'tpu_num_shards', 2,
'Number of TPU shards, a single TPU chip has 2 shards.')
def build_config():
"""Build config from flags defined in this module."""
config = {
flag.name: flag.value
for flag in FLAGS.flags_by_module_dict()[__name__]
}
return config
| archai/archai/supergraph/algos/nasbench101/config.py/0 | {
"file_path": "archai/archai/supergraph/algos/nasbench101/config.py",
"repo_id": "archai",
"token_count": 1453
} | 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import bisect
import math
import os
import random
from enum import Enum
from typing import List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import tensorwatch as tw
import yaml
from tensorwatch import ModelStats
from archai.common import utils
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.nas.model_desc import ModelDesc
from archai.supergraph.utils.metrics import Metrics
logger = get_global_logger()
class JobStage(Enum):
# below values must be assigned in sequence so getting next job stage enum is easy
SEED = 1
SEED_TRAINED = 2
SEARCH = 3
SEARCH_TRAINED = 4
EVAL = 5
EVAL_TRAINED = 6
class ExperimentStage(Enum):
SEARCH = 1
EVAL = 2
class ConvexHullPoint:
def __init__(self, job_stage:JobStage, parent_id:int,
sampling_count:int,
model_desc:ModelDesc,
cells_reductions_nodes:Tuple[int, int, int],
metrics:Optional[Metrics]=None,
model_stats:Optional[tw.ModelStats]=None) -> None:
# we only record points after training
self.job_stage = job_stage
self.parent_id = parent_id
self.sampling_count = sampling_count
self.model_desc = model_desc
self.cells_reductions_nodes = cells_reductions_nodes
self.metrics = metrics
self.model_stats = model_stats
# TODO: we use random IDs because with ray multiprocessing, its harder to have global
# id generation. Ideally we should use UUID or global store but for now we just
# use large enough random range
ConvexHullPoint._id = random.randint(0, 2147483648)
self.id = ConvexHullPoint._id
def is_trained_stage(self)->bool:
return self.job_stage==JobStage.SEARCH_TRAINED or self.job_stage==JobStage.SEED_TRAINED
def next_stage(self)->JobStage:
return JobStage(self.job_stage.value+1)
def _is_on_ray_left(x1, y1, x2, y2, x3, y3, inclusive=False, epsilon=0):
"""
Return whether x3,y3 is on the left side of the ray x1,y1 -> x2,y2.
If inclusive, then the answer is left or on the ray.
If otherwise, then the answer is strictly left.
"""
val = (x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)
if inclusive:
return val >= epsilon
return val > epsilon
def _convex_hull_from_points(xs, ys, eps=None, allow_increase=False):
"""
Andrew's Monotone Chain Algorithm: (https://en.wikipedia.org/wiki/Graham_scan)
Assume the data are sorted in order of xs, then the computation complexity is O(n)
If not sorted, then a sort by x-value is applied first. The complexity becomes O(nlog(n))
Return:
hull_indices (list): indices for the points on the hull exactly
eps_indices (list): indices for the points on the hull + eps tolerance
"""
indices = list(range(len(xs)))
if len(xs) <= 1:
return indices, indices
# check xs sorted
is_monotone = True
for i in range(1, len(xs)):
if xs[i] < xs[i-1]:
is_monotone = False
break
if not is_monotone:
indices.sort(key=lambda i : (xs[i], ys[i]))
def _remove_non_hull_idx(x1, y1, idxs):
while len(idxs) > 1:
x2, y2 = xs[idxs[-1]], ys[idxs[-1]]
x3, y3 = xs[idxs[-2]], ys[idxs[-2]]
if not _is_on_ray_left(x1, y1, x2, y2, x3, y3):
if np.abs(x1 - x2) > 1e-6 or np.abs(y1 - y2) > 1e-6:
# this ensures that the no points are duplicates
break
del idxs[-1]
return idxs
hull_indices = []
min_y = float('inf')
for idx in indices:
x1, y1 = xs[idx], ys[idx]
min_y = min(y1, min_y)
hull_indices = _remove_non_hull_idx(x1, y1, hull_indices)
hull_indices.append(idx)
if not allow_increase:
# use a fake final point at (2 * x_max , y_min) to remove increasing.
x1, y1 = xs[indices[-1]] * 2, min_y
hull_indices = _remove_non_hull_idx(x1, y1, hull_indices)
# compute epsilon hull (convex hull + (1+eps) band)
eps_indices = hull_indices
if eps is not None and eps > 0:
eps_indices = []
h_idx = 0 # right idx, in the hull_indices
for idx in indices:
x = xs[idx]
y = ys[idx]
if h_idx >= len(hull_indices):
# Larger than the largest model on the hull
#y_interp = min_y
y_interp = ys[hull_indices[-1]]
elif idx == hull_indices[h_idx]:
# critical pts on hull
y_interp = y
x1, y1 = x, y # hull point to left
h_idx += 1
if h_idx < len(hull_indices):
x2, y2 = xs[hull_indices[h_idx]], ys[hull_indices[h_idx]]
else:
#x2, y2 = xs[indices[-1]] * 2, min_y
x2, y2 = xs[indices[-1]] * 2, ys[hull_indices[-1]]
else:
# Between pts of hull
try:
y_interp = y1 + (y2 - y1) / (x2 - x1) * (x - x1)
if np.isnan(y_interp):
y_interp = min(y1, y2)
except:
# numerical issues when x2, x1 are close
y_interp = min(y1, y2)
if y <= y_interp * (1. + eps):
eps_indices.append(idx)
assert x1 <= x and x2 >= x, "idx={} idx[h_idx-1]={} idx[h_idx]={} x={} y={} x1={} x2={} y1={} y2={} y_interp={}".format(\
idx, hull_indices[h_idx-1], hull_indices[h_idx], x, y, x1, x2, y1, y2, y_interp)
return hull_indices, eps_indices
def _test_convex_hull():
# random points,
np.random.seed(19921102)
xs = np.random.uniform(size=100)
ys = np.random.uniform(size=100) - xs + 1.0
eps = np.random.uniform(low=0.0, high=0.3)
# compute eps convex hull.
hull_indices, indices = _convex_hull_from_points(xs, ys, eps=eps)
# plot
import matplotlib.pyplot as plt
plt.close('all')
hull_xs = [xs[i] for i in indices]
hull_ys = [ys[i] for i in indices]
bound_xs = [xs[i] for i in hull_indices]
bound_ys = [ys[i] * (1+eps) for i in hull_indices]
plt.plot(bound_xs, bound_ys, c='red', label='eps-bound')
plt.scatter(xs, ys, label='pts')
plt.scatter(hull_xs, hull_ys, c='black', marker='+', label='eps-hull')
plt.show()
#plt.savefig(os.path.join('./temp', 'debug', 'convex_hull.png'),
# dpi=plt.gcf().dpi, bbox_inches='tight')
def _convex_hull_insert(hull_xs, hull_ys, x, y, eps=0.0, eg_xs=[], eg_ys=[]):
"""
Insert a new point (x,y) to a lower convex hull defined by
hull_xs and hull_ys.
Assume hull_xs are sorted (increasing).
returns:
remove_slice (slice or None) : None if no insert. slice(left, right) if to
remove the indices left:right
"""
assert y >= 0, "eps greedy of convex hull need all y > 0"
# right most point is a fake point (inf, min_y), so the curve is decreasing.
# left most point is a fake point (0, inf), so that the cheapest is always kept.
n_xs = len(hull_xs)
if n_xs == 0:
# always insert
return slice(0, 0)
min_y = np.min(hull_ys)
idx = bisect.bisect_left(hull_xs, x)
if idx == n_xs:
# right most (also covers the n_xs == 1)
hull_y = min_y
elif hull_xs[idx] == x:
hull_y = hull_ys[idx]
elif idx == 0:
# left most
hull_y = y * 1.1
if hull_y == 0:
hull_y = 1.0
else:
y1, y2 = hull_ys[idx-1:idx+1]
x1, x2 = hull_xs[idx-1:idx+1]
# linear interpolate
hull_y = y1 + (y2 - y1) / (x2 - x1) * (x - x1)
diff = ( y - hull_y ) / hull_y
if diff > eps:
# do not insert
return None
if diff >= 0:
# we insert b/c of eps-greedy. Or there are three points on the same line.
return slice(n_xs, n_xs)
# now diff < 0
# fix left side
slice_left = idx
for li in reversed(range(1, idx)):
x3, x2 = hull_xs[li-1:li+1]
y3, y2 = hull_ys[li-1:li+1]
to_rm_li = _is_on_ray_left(x, y, x2, y2, x3, y3, inclusive=False)
if to_rm_li:
slice_left = li
else:
break
# fix right side
slice_right = idx
min_y = min(y, min_y)
for li in range(idx, n_xs):
if li < n_xs - 1:
x2, x3 = hull_xs[li:li+2]
y2, y3 = hull_ys[li:li+2]
else:
x2, x3 = hull_xs[li], hull_xs[li] * 2
y2, y3 = hull_ys[li], min_y
to_rm_li = not _is_on_ray_left(x, y, x2, y2, x3, y3, inclusive=True)
if to_rm_li:
slice_right = li + 1
else:
break
# TODO epsilon greedy
return slice(slice_left, slice_right)
def _test_convex_hull_insert():
import argparse
import matplotlib.pyplot as plt
plt.close('all')
parser = argparse.ArgumentParser()
parser.add_argument('--xs', type=str)
parser.add_argument('--ys', type=str)
parser.add_argument('-x', type=float)
parser.add_argument('-y', type=float)
args = parser.parse_args()
y_max = 1.0
y_min = 0.01
def _random_convex_perf_curve(n):
slopes = np.random.uniform(low=-3, high=-1e-2, size=n-1)
slopes.sort()
delta_ys = np.random.uniform(size=n-1)
delta_ys = delta_ys / np.sum(delta_ys) * (-y_max + y_min)
delta_xs = delta_ys / slopes
xs = [1] + list(np.cumsum(delta_xs) + 1)
ys = [y_max] + list(np.cumsum(delta_ys) + y_max)
return xs, ys
if args.xs and args.ys:
xs = list(map(float, args.xs.split(',')))
ys = list(map(float, args.ys.split(',')))
else:
xs, ys = _random_convex_perf_curve(8)
print(xs)
print(ys)
plt.plot(xs + [xs[-1] * 2.0], ys + [y_min], color='r', marker='x')
n = len(xs)
scater_xs, scater_ys = [], []
if args.x and args.y:
x = args.x
y = args.y
scater_xs.append(x)
scater_ys.append(y)
locs = [0, 1, n//2, n-2, n-1]
for i in locs:
if i < n-1:
x = (xs[i] + xs[i+1]) / 2.
y = max(y_min / 2, min(y_max, (1 + np.random.uniform(-0.9, -0.3)) * (ys[i] + ys[i+1]) / 2.))
else:
x = xs[i] * 1.5
y = max(y_min / 2, min(y_max, (1 + np.random.uniform(-0.9, -0.3)) * ys[i]))
scater_xs.append(x)
scater_ys.append(y)
x = (2 * xs[-1] + xs[-2]) / 3
y = y_min / 2
scater_xs.append(x)
scater_ys.append(y)
for x, y in zip(scater_xs, scater_ys):
ret = _convex_hull_insert(xs, ys, x, y)
print("x={} y={} ret={}".format(x, y, ret))
plt.scatter(scater_xs, scater_ys)
plt.savefig(os.path.join('./temp', 'debug', 'convex_hull_insert.png'),
dpi=plt.gcf().dpi, bbox_inches='tight')
def model_descs_on_front(hull_points:List[ConvexHullPoint], convex_hull_eps:float,
stage:ExperimentStage, lower_hull:bool=True)\
->Tuple[List[ConvexHullPoint], List[ConvexHullPoint], List[float], List[float]]:
assert(len(hull_points) > 0)
top1_list = get_top1_for_stage(hull_points, stage)
xs = [point.model_stats.MAdd for point in hull_points]
ys = [1.0-top1 if lower_hull else top1 for top1 in top1_list]
hull_indices, eps_indices = _convex_hull_from_points(xs, ys, eps=convex_hull_eps)
eps_points = [hull_points[i] for i in eps_indices]
front_points = [hull_points[i] for i in hull_indices]
return front_points, eps_points, xs, ys
def hull_points2tsv(points:List[ConvexHullPoint])->str:
lines = ['\t'.join(['id', 'job_stage',
'cells', 'reductions', 'nodes',
'MAdd', 'flops', 'duration',
'mread', 'mwrite',
'inference_memory', 'parameters',
'train_best_epoch', 'train_best_top1',
'val_best_epoch', 'val_best_top1',
'test_best_epoch', 'test_best_top1',
'paent_id', 'sampling_count'])]
for p in points:
cells, reductions, nodes = p.cells_reductions_nodes
mstats, metrics = p.model_stats, p.metrics
vals = []
vals.extend([p.id, JobStage(p.job_stage).name])
# add macro
vals.extend([cells, reductions, nodes])
# add model stats
vals.extend([mstats.MAdd, mstats.Flops, mstats.duration])
vals.extend([mstats.mread, mstats.mwrite])
vals.extend([mstats.inference_memory, mstats.parameters])
# add metrics
train_metrics, val_metrics, test_metrics = metrics.run_metrics.best_epoch()
vals.extend([train_metrics.index, train_metrics.top1.avg])
if val_metrics:
vals.extend([val_metrics.index, val_metrics.top1.avg])
else:
vals.extend([math.nan, math.nan])
if test_metrics:
vals.extend([test_metrics.index, test_metrics.top1.avg])
else:
vals.extend([math.nan, math.nan])
# other attributes
vals.extend([p.parent_id, p.sampling_count])
line = '\t'.join([str(v) for v in vals])
lines.append(line)
return '\n'.join(lines)
def sample_from_hull(hull_points:List[ConvexHullPoint], convex_hull_eps:float,
stage:ExperimentStage=ExperimentStage.SEARCH)->ConvexHullPoint:
front_points, eps_points, xs, ys = model_descs_on_front(hull_points,
convex_hull_eps, stage)
logger.info(f'num models in pool: {len(hull_points)}')
logger.info(f'num models on front: {len(front_points)}')
logger.info(f'num models on front with eps: {len(eps_points)}')
# form scores to non-maxima supress models already sampled
counts = [point.sampling_count for point in eps_points]
counts_max = max(counts)
counts_min = min(counts)
if counts_max == counts_min:
counts_range = counts_max
else:
counts_range = counts_max - counts_min
# to prevent division by 0
if counts_range == 0:
counts_range = 1
# scale between [0,1] to avoid numerical issues
scaled_counts = [(count - counts_min)/counts_range for count in counts]
count_scores = [1.0/(scaled_count + 1) for scaled_count in scaled_counts]
# form scores to sample inversely proportional to madds
# since it takes less compute to train a smaller model
# this allows us to evaluate each point equal number of times
# with any compute budget
eps_madds = [point.model_stats.MAdd for point in eps_points]
madd_max = max(eps_madds)
madd_min = min(eps_madds)
if madd_max == madd_min:
madd_range = madd_max
else:
madd_range = madd_max - madd_min
# to prevent division by 0
if madd_range == 0:
madd_range = 1
# scale between [0,1] to avoid numerical issues
scaled_madds = [(madd - madd_min)/madd_range for madd in eps_madds]
madd_scores = [1.0/(scaled_madd + 1) for scaled_madd in scaled_madds]
overall_scores = np.array(count_scores) + np.array(madd_scores)
overall_scores = overall_scores / np.sum(overall_scores)
sampled_point = np.random.choice(eps_points, p=overall_scores)
sampled_point.sampling_count += 1
return sampled_point
def get_top1_for_stage(hull_points:List[ConvexHullPoint], stage:ExperimentStage)->List[float]:
'''Return top1 accuracy according to the experiment stage (SEARCH or EVAL)'''
if stage == ExperimentStage.SEARCH:
top1_list = [point.metrics.best_val_top1() for point in hull_points]
else:
top1_list = [point.metrics.best_test_top1() for point in hull_points]
return top1_list
def plot_frontier(hull_points:List[ConvexHullPoint], convex_hull_eps:float,
expdir:str, stage:ExperimentStage=ExperimentStage.SEARCH)->None:
front_points, eps_points, xs, ys = model_descs_on_front(hull_points,
convex_hull_eps, stage)
top1_list_front = get_top1_for_stage(front_points, stage)
top1_list_eps = get_top1_for_stage(eps_points, stage)
# save a plot of the convex hull to aid debugging
hull_xs = [p.model_stats.MAdd for p in eps_points]
hull_ys = [1.0-top1 for top1 in top1_list_eps]
bound_xs = [p.model_stats.MAdd for p in front_points]
bound_ys = [(1.0-top1) * (1+convex_hull_eps) for top1 in top1_list_front]
# for easier interpretation report everything in million increments
xs_m = [x/1e6 for x in xs]
hull_xs_m = [x/1e6 for x in hull_xs]
bound_xs_m = [x/1e6 for x in bound_xs]
plt.clf()
plt.plot(bound_xs_m, bound_ys, c='red', label='eps-bound')
plt.scatter(xs_m, ys, label='pts')
plt.scatter(hull_xs_m, hull_ys, c='black', marker='+', label='eps-hull')
plt.xlabel('Multiply-Additions (Millions)')
plt.ylabel('Top1 Error')
plt.savefig(os.path.join(expdir, 'convex_hull.png'),
dpi=plt.gcf().dpi, bbox_inches='tight')
def plot_pool(hull_points:List[ConvexHullPoint], expdir:str,
stage:ExperimentStage=ExperimentStage.SEARCH)->None:
assert(len(hull_points) > 0)
xs_madd = []
xs_flops = []
xs_params = []
ys = get_top1_for_stage(hull_points, stage)
for p in hull_points:
xs_madd.append(p.model_stats.MAdd)
xs_flops.append(p.model_stats.Flops)
xs_params.append(p.model_stats.parameters)
# for easier interpretation report everything in million increments
xs_madd_m = [x/1e6 for x in xs_madd]
xs_flops_m = [x/1e6 for x in xs_flops]
xs_params_m = [x/1e6 for x in xs_params]
madds_plot_filename = os.path.join(expdir, 'model_gallery_accuracy_madds.png')
plt.clf()
plt.scatter(xs_madd_m, ys)
plt.xlabel('Multiply-Additions (Millions)')
plt.ylabel('Top1 Accuracy')
plt.savefig(madds_plot_filename, dpi=plt.gcf().dpi, bbox_inches='tight')
flops_plot_filename = os.path.join(expdir, 'model_gallery_accuracy_flops.png')
plt.clf()
plt.scatter(xs_flops_m, ys)
plt.xlabel('Flops (Millions)')
plt.ylabel('Top1 Accuracy')
plt.savefig(flops_plot_filename, dpi=plt.gcf().dpi, bbox_inches='tight')
params_plot_filename = os.path.join(expdir, 'model_gallery_accuracy_params.png')
plt.clf()
plt.scatter(xs_params_m, ys)
plt.xlabel('Params (Millions)')
plt.ylabel('Top1 Accuracy')
plt.savefig(params_plot_filename, dpi=plt.gcf().dpi, bbox_inches='tight')
def plot_seed_model_stats(seed_model_stats:List[ModelStats], expdir:str)->None:
xs_madd = [p.MAdd for p in seed_model_stats]
xs_madd_m = [x/1e6 for x in xs_madd]
ys_zero = [0 for x in xs_madd]
madds_plot_filename = os.path.join(expdir, 'seed_models_madds.png')
plt.clf()
plt.scatter(xs_madd_m, ys_zero)
plt.xlabel('Multiply-Additions (Millions)')
plt.savefig(madds_plot_filename, dpi=plt.gcf().dpi, bbox_inches='tight')
def save_hull_frontier(hull_points:List[ConvexHullPoint], convex_hull_eps:float,
final_desc_foldername:str, expdir:str, stage:ExperimentStage=ExperimentStage.SEARCH)->ConvexHullPoint:
# make folder to save gallery of models after search
final_desc_dir = utils.full_path(final_desc_foldername, create=True)
# save the front on hull
front_points, eps_points, xs, ys = model_descs_on_front(hull_points,
convex_hull_eps,
stage)
for i, eps_point in enumerate(eps_points):
# save readable model desc yaml
eps_point.model_desc.save(os.path.join(final_desc_dir, f'model_desc_{i}.yaml'))
# save hull point
eps_point.model_desc.clear_trainables() # make file lightweight
utils.write_string(os.path.join(final_desc_dir, f'hull_{i}.yaml'), yaml.dump(eps_point))
front_summary_filepath = os.path.join(expdir, 'pareto_front_summary.tsv')
utils.write_string(front_summary_filepath, hull_points2tsv(front_points))
eps_summary_filepath = os.path.join(expdir, 'pareto_eps_summary.tsv')
utils.write_string(eps_summary_filepath, hull_points2tsv(eps_points))
xy_filepath = os.path.join(expdir, 'pareto_xy.tsv')
utils.write_string(xy_filepath, '\n'.join([str(x)+'\t'+str(y) \
for x,y in utils.zip_eq(xs, ys)]))
# return last model as best performing
return eps_points[-1]
def save_hull(hull_points:List[ConvexHullPoint], expdir:str)->None:
full_pool_filepath = os.path.join(expdir, 'full_pool.tsv')
utils.write_string(full_pool_filepath, hull_points2tsv(hull_points)) | archai/archai/supergraph/algos/petridish/petridish_utils.py/0 | {
"file_path": "archai/archai/supergraph/algos/petridish/petridish_utils.py",
"repo_id": "archai",
"token_count": 10156
} | 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Union
from torch.utils.data import ConcatDataset, Dataset, Subset
class LimitDataset(Dataset):
def __init__(self, dataset, n):
self.dataset = dataset
self.n = n
if hasattr(dataset, 'targets'):
self.targets = dataset.targets[:n]
def __len__(self):
return self.n
def __getitem__(self, i):
return self.dataset[i]
DatasetLike = Optional[Union[Dataset, Subset, ConcatDataset, LimitDataset]]
| archai/archai/supergraph/datasets/limit_dataset.py/0 | {
"file_path": "archai/archai/supergraph/datasets/limit_dataset.py",
"repo_id": "archai",
"token_count": 233
} | 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torchvision
from overrides import overrides
from torch.utils.data import ConcatDataset
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class SvhnProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',
download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',
download=True, transform=transform_train)
trainset = ConcatDataset([trainset, extraset])
if load_test:
testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.20100]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('svhn', SvhnProvider) | archai/archai/supergraph/datasets/providers/svhn_provider.py/0 | {
"file_path": "archai/archai/supergraph/datasets/providers/svhn_provider.py",
"repo_id": "archai",
"token_count": 850
} | 330 |
import os
import torch
import torch.nn as nn
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
def __init__(self, features, num_classes=10, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, device, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(script_dir + '/state_dicts/'+arch+'.pt', map_location=device)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, device='cpu', **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, device, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, device='cpu', **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, device, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, device='cpu', **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, device, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, device='cpu', **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, device, **kwargs)
| archai/archai/supergraph/models/vgg.py/0 | {
"file_path": "archai/archai/supergraph/models/vgg.py",
"repo_id": "archai",
"token_count": 2552
} | 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
from overrides import overrides
from torch import nn
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model_desc import EdgeDesc, NodeDesc
from archai.supergraph.nas.operations import Zero
class RandomFinalizers(Finalizers):
@overrides
def finalize_node(self, node:nn.ModuleList, node_index:int,
node_desc:NodeDesc, max_final_edges:int,
*args, **kwargs)->NodeDesc:
# get total number of ops incoming to this node
in_ops = [(edge,op) for edge in node \
for op, order in edge._op.ops()
if not isinstance(op, Zero)]
assert len(in_ops) >= max_final_edges
selected = random.sample(in_ops, max_final_edges)
# finalize selected op, select 1st value from return which is op finalized desc
selected_edges = [EdgeDesc(s[1].finalize()[0], s[0].input_ids) \
for s in selected]
return NodeDesc(selected_edges, node_desc.conv_params)
| archai/archai/supergraph/nas/random_finalizers.py/0 | {
"file_path": "archai/archai/supergraph/nas/random_finalizers.py",
"repo_id": "archai",
"token_count": 480
} | 332 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Union
from overrides import overrides
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.utilities.types import (
_EVALUATE_OUTPUT,
_PREDICT_OUTPUT,
EVAL_DATALOADERS,
TRAIN_DATALOADERS,
)
from archai.api.trainer_base import TrainerBase
class PlTrainer(Trainer, TrainerBase):
"""PyTorch-Lightning trainer."""
@overrides
def train(
self,
model: LightningModule,
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
return self.fit(
model,
train_dataloaders=train_dataloaders,
val_dataloaders=val_dataloaders,
datamodule=datamodule,
ckpt_path=ckpt_path,
)
@overrides
def evaluate(
self,
model: Optional[LightningModule] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: Optional[bool] = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
return self.test(
model=model, dataloaders=dataloaders, ckpt_path=ckpt_path, verbose=verbose, datamodule=datamodule
)
@overrides
def predict(
self,
model: Optional[LightningModule] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
# Needs to call method directly from class to avoid infinite recurssion
# due to same method name in TrainerBase
return Trainer.predict(
self,
model=model,
dataloaders=dataloaders,
datamodule=datamodule,
return_predictions=return_predictions,
ckpt_path=ckpt_path,
)
| archai/archai/trainers/cv/pl_trainer.py/0 | {
"file_path": "archai/archai/trainers/cv/pl_trainer.py",
"repo_id": "archai",
"token_count": 1029
} | 333 |
__include__: 'darts.yaml' # just use darts defaults
| archai/confs/aug/aug_cifar.yaml/0 | {
"file_path": "archai/confs/aug/aug_cifar.yaml",
"repo_id": "archai",
"token_count": 17
} | 334 |
name: sample-nas-env
channels:
- conda-forge
- pytorch
- nvidia
dependencies:
- python=3.10
- pip
- pip:
- azure-ai-ml==1.5.0
- azure-storage-blob
- azure-data-tables
- azure-identity
- azureml-mlflow
- matplotlib
- mldesigner
- mlflow
- torch
- torchvision
- torchaudio
- "archai[dev] @ git+https://github.com/microsoft/archai.git" | archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/conda.yaml/0 | {
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/conda.yaml",
"repo_id": "archai",
"token_count": 186
} | 335 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import torch
import json
import time
import os
from archai.discrete_search.api import SearchObjectives
from archai.discrete_search.evaluators import AvgOnnxLatency, TorchFlops
from archai.discrete_search.evaluators import TorchNumParameters
from archai.discrete_search.algos import EvolutionParetoSearch
from archai.discrete_search.search_spaces.config import ArchParamTree, ConfigSearchSpace, DiscreteChoice
from archai.discrete_search.api.search_space import EvolutionarySearchSpace
from aml_training_evaluator import AmlTrainingValAccuracy
from azure.ai.ml.identity import AzureMLOnBehalfOfCredential
from azure.identity import DefaultAzureCredential
from azure.ai.ml import MLClient
from model import MyModel
def search(amlEvaluator: AmlTrainingValAccuracy,
space: EvolutionarySearchSpace,
local_output : str,
init_num_models : int,
iterations: int,
max_unseen_population : int):
""" Run an Archai EvolutionParetoSearch over the given search space, and search options, optimizing the model
using objectives for ONNX Latency, FLOPs, and the AmlTrainingValAccuracy model evaluator. """
# create our search objectives
search_objectives = SearchObjectives()
search_objectives.add_constraint(
'Number of parameters',
TorchNumParameters(),
constraint=(0.0, 1e6)
)
search_objectives.add_objective(
# Objective function name (will be used in plots and reports)
name='ONNX Latency (ms)',
# ModelEvaluator object that will be used to evaluate the model
model_evaluator=AvgOnnxLatency(input_shape=(1, 1, 28, 28), num_trials=3, device='cpu'),
# Optimization direction, `True` for maximization or `False` for minimization
higher_is_better=False,
# Whether this objective should be considered 'compute intensive' or not.
compute_intensive=False
)
search_objectives.add_objective(
name='FLOPs',
model_evaluator=TorchFlops(torch.randn(1, 1, 28, 28)),
higher_is_better=False,
compute_intensive=False,
# We may optionally add a constraint.
# Architectures outside this range will be ignored by the search algorithm
constraint=(0.0, 1e9)
)
search_objectives.add_objective(
name='AmlTrainingValAccuracy',
model_evaluator=amlEvaluator,
higher_is_better=True,
compute_intensive=True
)
algo = EvolutionParetoSearch(
space,
search_objectives,
None,
local_output,
num_iters=iterations,
max_unseen_population=max_unseen_population,
init_num_models=init_num_models,
seed=int(time.time()),
save_pareto_model_weights=False # we are training elsewhere, can't do this locally!
)
return algo.search()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, help="path to prepared dataset")
parser.add_argument("--environment", type=str, help="name of AML environment to run the partial training jobs in")
parser.add_argument("--experiment", type=str, help="name of AML experiment to use")
parser.add_argument("--compute", type=str, help="name of AML compute to run the partial training jobs on")
parser.add_argument('--config', type=str, help='bin hexed config json info for MLClient')
parser.add_argument("--output_dir", type=str, help="path to output data")
parser.add_argument("--local_output", type=str, help="optional path to local output data (default output_dir)")
parser.add_argument("--search_iterations", type=int, default=10, help="Number of evolutionary search iterations")
parser.add_argument("--init_num_models", type=int, default=10, help="Number of initial models to evaluate")
parser.add_argument("--max_unseen_population", type=int, default=10, help="Maximum number of unseen models to evaluate in each iteration.")
parser.add_argument("--partial_training_epochs", type=float, default=0.01, help="Number of epochs for partial training")
# parser.add_argument("--full_training_epochs", type=float, default=10, help="Number of epochs for final training")
args = parser.parse_args()
environment_name = args.environment
experiment_name = args.experiment
compute_name = args.compute
data_dir = args.data_dir
output_dir = args.output_dir
init_num_models = args.init_num_models
partial_training_epochs = args.partial_training_epochs
iterations = args.search_iterations
max_unseen_population = args.max_unseen_population
print("Starting search with: ")
print(f"Environment: {environment_name}")
print(f"Compute: {compute_name}")
print(f"Data dir: {data_dir}")
print(f"Output dir: {output_dir}")
identity = AzureMLOnBehalfOfCredential()
if args.config:
print("Using AzureMLOnBehalfOfCredential...")
workspace_config = str(bytes.fromhex(args.config), encoding='utf-8')
print(f"Config: {workspace_config}")
config = json.loads(workspace_config)
else:
print("Using DefaultAzureCredential...")
config_file = "../.azureml/config.json"
print(f"Config: {config_file}")
config = json.load(open(config_file, 'r'))
identity = DefaultAzureCredential()
# setup the ConfigSearchSpace from given ArchParamTree configuration.
arch_param_tree = ArchParamTree({
'nb_layers': DiscreteChoice(list(range(1, 13))),
'kernel_size': DiscreteChoice([1, 3, 5, 7]),
'hidden_dim': DiscreteChoice([16, 32, 64, 128])
})
space = ConfigSearchSpace(MyModel, arch_param_tree, mutation_prob=0.3)
# Make sure we have permission to access the ml_client, this will be needed in the
# AmlTrainingValAccuracy evaluator so it can create child pipelines.
subscription = config['subscription_id']
resource_group = config['resource_group']
workspace_name = config['workspace_name']
ml_client = MLClient(
identity,
subscription,
resource_group,
workspace_name
)
ml_client.datastores.get('datasets')
print(f"Successfully found dataset from workspace {workspace_name} in resource group {resource_group}")
local_output = args.local_output
if not local_output:
local_output = args.output_dir
amlEvaluator = AmlTrainingValAccuracy(config=config,
compute_cluster_name=compute_name,
environment_name=environment_name, # AML environment name
datastore_path=data_dir, # AML datastore path
models_path=output_dir,
local_output=local_output,
experiment_name=experiment_name,
ml_client=ml_client,
save_models=False, # these partially trained models are not useful
partial_training=True,
training_epochs=partial_training_epochs)
results = search(amlEvaluator, space, local_output, init_num_models, iterations, max_unseen_population)
pareto = results.get_pareto_frontier()["models"]
pareto_models = []
for m in pareto:
config = m.metadata['config']
m.arch = MyModel(config)
d = config.to_dict()
id = str(m.archid)
d['archid'] = id
pareto_models += [d]
with open(os.path.join(local_output, 'pareto.json'), 'w') as f:
f.write(json.dumps(pareto_models, indent=2))
if __name__ == "__main__":
main()
| archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/search.py/0 | {
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/search.py",
"repo_id": "archai",
"token_count": 3129
} | 336 |
Natural Language Processing
===========================
.. toctree::
:maxdepth: 2
Fast HF Dataset Provider <nlp/fast_hf_dataset_provider.ipynb>
HF Dataset Provider <nlp/hf_dataset_provider.ipynb>
HF Trainer <nlp/hf_trainer.ipynb>
NVIDIA Dataset Provider <nlp/nvidia_dataset_provider.ipynb>
NVIDIA Trainer <nlp/nvidia_trainer.ipynb>
ONNX Export <nlp/onnx_export.ipynb>
PyTorch Quantization <nlp/torch_quantization.ipynb>
Transformer++ Search Space <nlp/tfpp_ss.ipynb>
| archai/docs/getting_started/notebooks/nlp.rst/0 | {
"file_path": "archai/docs/getting_started/notebooks/nlp.rst",
"repo_id": "archai",
"token_count": 206
} | 337 |
API
===
Dataset Provider
----------------
.. automodule:: archai.api.dataset_provider
:members:
:undoc-members:
Trainer (Base Class)
--------------------
.. automodule:: archai.api.trainer_base
:members:
:undoc-members:
| archai/docs/reference/api/archai.api.rst/0 | {
"file_path": "archai/docs/reference/api/archai.api.rst",
"repo_id": "archai",
"token_count": 86
} | 338 |
Configuration-Based
===================
Architecture Configuration
--------------------------
.. automodule:: archai.discrete_search.search_spaces.config.arch_config
:members:
:undoc-members:
Architecture Parameter Tree
---------------------------
.. automodule:: archai.discrete_search.search_spaces.config.arch_param_tree
:members:
:undoc-members:
Discrete Choice
---------------
.. automodule:: archai.discrete_search.search_spaces.config.discrete_choice
:members:
:undoc-members:
Helpers
-------
.. automodule:: archai.discrete_search.search_spaces.config.helpers
:members:
:undoc-members:
Search Space
------------
.. automodule:: archai.discrete_search.search_spaces.config.search_space
:members:
:undoc-members:
Utilities
---------
.. automodule:: archai.discrete_search.search_spaces.config.utils
:members:
:undoc-members:
| archai/docs/reference/api/archai.discrete_search.search_spaces.config.rst/0 | {
"file_path": "archai/docs/reference/api/archai.discrete_search.search_spaces.config.rst",
"repo_id": "archai",
"token_count": 292
} | 339 |
Gumbel-Softmax
==============
Architecture Trainer
--------------------
.. automodule:: archai.supergraph.algos.gumbelsoftmax.gs_arch_trainer
:members:
:undoc-members:
Experiment Runner
-----------------
.. automodule:: archai.supergraph.algos.gumbelsoftmax.gs_exp_runner
:members:
:undoc-members:
Finalizers
----------
.. automodule:: archai.supergraph.algos.gumbelsoftmax.gs_finalizers
:members:
:undoc-members:
Model Description Builder
-------------------------
.. automodule:: archai.supergraph.algos.gumbelsoftmax.gs_model_desc_builder
:members:
:undoc-members:
Operators
---------
.. automodule:: archai.supergraph.algos.gumbelsoftmax.gs_op
:members:
:undoc-members:
| archai/docs/reference/api/archai.supergraph.algos.gumbelsoftmax.rst/0 | {
"file_path": "archai/docs/reference/api/archai.supergraph.algos.gumbelsoftmax.rst",
"repo_id": "archai",
"token_count": 256
} | 340 |
Trainers
========
.. toctree::
:maxdepth: 2
archai.trainers.cv
archai.trainers.nlp
Coin-Betting Optimizer
----------------------
.. automodule:: archai.trainers.coin_betting_optimizer
:members:
:undoc-members:
Cyclic Cosine Scheduler
-----------------------
.. automodule:: archai.trainers.cyclic_cosine_scheduler
:members:
:undoc-members:
Gradual Warmup Scheduler
------------------------
.. automodule:: archai.trainers.gradual_warmup_scheduler
:members:
:undoc-members:
LAMB Optimizer
--------------
.. automodule:: archai.trainers.lamb_optimizer
:members:
:undoc-members:
Losses
------
.. automodule:: archai.trainers.losses
:members:
:undoc-members:
| archai/docs/reference/api/archai.trainers.rst/0 | {
"file_path": "archai/docs/reference/api/archai.trainers.rst",
"repo_id": "archai",
"token_count": 262
} | 341 |