python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
allennlp-master | tests/tutorials/__init__.py |
|
allennlp-master | tests/tutorials/tagger/__init__.py |
|
import pytest
from allennlp.common.testing import AllenNlpTestCase
@pytest.mark.skip("makes test-install fail (and also takes 30 seconds)")
class TestBasicAllenNlp(AllenNlpTestCase):
@classmethod
def test_run_as_script(cls):
# Just ensure the tutorial runs without throwing an exception.
import tutorials.tagger.basic_allennlp # noqa
| allennlp-master | tests/tutorials/tagger/basic_allennlp_test.py |
import argparse
import csv
import io
import json
import os
import pathlib
import shutil
import sys
import tempfile
import pytest
from allennlp.commands import main
from allennlp.commands.predict import Predict
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import JsonDict, push_python_path
from allennlp.data.dataset_readers import DatasetReader, TextClassificationJsonReader
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor, TextClassifierPredictor
class TestPredict(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.classifier_model_path = (
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
self.classifier_data_path = (
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus.jsonl"
)
self.tempdir = pathlib.Path(tempfile.mkdtemp())
self.infile = self.tempdir / "inputs.txt"
self.outfile = self.tempdir / "outputs.txt"
def test_add_predict_subparser(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Predict().add_subparser(subparsers)
kebab_args = [
"predict", # command
"/path/to/archive", # archive
"/dev/null", # input_file
"--output-file",
"/dev/null",
"--batch-size",
"10",
"--cuda-device",
"0",
"--silent",
]
args = parser.parse_args(kebab_args)
assert args.func.__name__ == "_predict"
assert args.archive_file == "/path/to/archive"
assert args.output_file == "/dev/null"
assert args.batch_size == 10
assert args.cuda_device == 0
assert args.silent
def test_works_with_known_model(self):
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_using_dataset_reader_works_with_known_model(self):
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert set(result.keys()) == {"label", "logits", "loss", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_uses_correct_dataset_reader(self):
# We're going to use a fake predictor for this test, just checking that we loaded the
# correct dataset reader. We'll also create a fake dataset reader that subclasses the
# expected one, and specify that one for validation.
@Predictor.register("test-predictor")
class _TestPredictor(Predictor):
def dump_line(self, outputs: JsonDict) -> str:
data = {"dataset_reader_type": type(self._dataset_reader).__name__} # type: ignore
return json.dumps(data) + "\n"
def load_line(self, line: str) -> JsonDict:
raise NotImplementedError
@DatasetReader.register("fake-reader")
class FakeDatasetReader(TextClassificationJsonReader):
pass
# --use-dataset-reader argument only should use validation
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "FakeDatasetReader"
# --use-dataset-reader, override with train
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
"--dataset-reader-choice",
"train",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "TextClassificationJsonReader"
# --use-dataset-reader, override with validation
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
"--dataset-reader-choice",
"validation",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "FakeDatasetReader"
# No --use-dataset-reader flag, fails because the loading logic
# is not implemented in the testing predictor
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
]
with pytest.raises(NotImplementedError):
main()
def test_base_predictor(self):
# Tests when no Predictor is found and the base class implementation is used
model_path = str(self.classifier_model_path)
archive = load_archive(model_path)
model_type = archive.config.get("model").get("type")
# Makes sure that we don't have a default_predictor for it. Otherwise the base class
# implementation wouldn't be used
from allennlp.models import Model
model_class, _ = Model.resolve_class_name(model_type)
saved_default_predictor = model_class.default_predictor
model_class.default_predictor = None
try:
# Doesn't use a --predictor
sys.argv = [
"__main__.py", # executable
"predict", # command
model_path,
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert set(result.keys()) == {
"logits",
"probs",
"label",
"loss",
"tokens",
"token_ids",
}
finally:
model_class.default_predictor = saved_default_predictor
def test_batch_prediction_works_with_known_model(self):
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--batch-size",
"2",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_fails_without_required_args(self):
sys.argv = [
"__main__.py",
"predict",
"/path/to/archive",
] # executable # command # archive, but no input file
with pytest.raises(SystemExit) as cm:
main()
assert cm.value.code == 2 # argparse code for incorrect usage
def test_can_specify_predictor(self):
@Predictor.register("classification-explicit")
class ExplicitPredictor(TextClassifierPredictor):
"""same as classifier predictor but with an extra field"""
def predict_json(self, inputs: JsonDict) -> JsonDict:
result = super().predict_json(inputs)
result["explicit"] = True
return result
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"classification-explicit",
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
# Overridden predictor should output extra field
for result in results:
assert set(result.keys()) == {
"label",
"logits",
"explicit",
"probs",
"tokens",
"token_ids",
}
shutil.rmtree(self.tempdir)
def test_other_modules(self):
# Create a new package in a temporary dir
packagedir = self.TEST_DIR / "testpackage"
packagedir.mkdir()
(packagedir / "__init__.py").touch()
# And add that directory to the path
with push_python_path(self.TEST_DIR):
# Write out a duplicate predictor there, but registered under a different name.
from allennlp.predictors import text_classifier
with open(text_classifier.__file__) as f:
code = f.read().replace(
"""@Predictor.register("text_classifier")""",
"""@Predictor.register("duplicate-test-predictor")""",
)
with open(os.path.join(packagedir, "predictor.py"), "w") as f:
f.write(code)
self.infile = os.path.join(self.TEST_DIR, "inputs.txt")
self.outfile = os.path.join(self.TEST_DIR, "outputs.txt")
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"duplicate-test-predictor",
"--silent",
]
# Should raise ConfigurationError, because predictor is unknown
with pytest.raises(ConfigurationError):
main()
# But once we include testpackage, it should be known
sys.argv.extend(["--include-package", "testpackage"])
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
# Overridden predictor should output extra field
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
def test_alternative_file_formats(self):
@Predictor.register("classification-csv")
class CsvPredictor(TextClassifierPredictor):
"""same as classification predictor but using CSV inputs and outputs"""
def load_line(self, line: str) -> JsonDict:
reader = csv.reader([line])
sentence, label = next(reader)
return {"sentence": sentence, "label": label}
def dump_line(self, outputs: JsonDict) -> str:
output = io.StringIO()
writer = csv.writer(output)
row = [outputs["label"], *outputs["probs"]]
writer.writerow(row)
return output.getvalue()
with open(self.infile, "w") as f:
writer = csv.writer(f)
writer.writerow(["the seahawks won the super bowl in 2016", "pos"])
writer.writerow(["the mariners won the super bowl in 2037", "neg"])
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"classification-csv",
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile) as f:
reader = csv.reader(f)
results = [row for row in reader]
assert len(results) == 2
for row in results:
assert len(row) == 3 # label and 2 class probabilities
label, *probs = row
for prob in probs:
assert 0 <= float(prob) <= 1
assert label != ""
shutil.rmtree(self.tempdir)
| allennlp-master | tests/commands/predict_test.py |
import argparse
import os
import pytest
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.data import DataLoader
from allennlp.models import Model
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase, requires_multi_gpu
from allennlp.commands.find_learning_rate import (
search_learning_rate,
find_learning_rate_from_args,
find_learning_rate_model,
FindLearningRate,
)
from allennlp.training import Trainer
from allennlp.training.util import datasets_from_params
def is_matplotlib_installed():
try:
import matplotlib # noqa: F401 - Matplotlib is optional.
except: # noqa: E722. Any exception means we don't have a working matplotlib.
return False
return True
class TestFindLearningRate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"cuda_device": -1, "num_epochs": 2, "optimizer": "adam"},
}
)
@pytest.mark.skipif(not is_matplotlib_installed(), reason="matplotlib dependency is optional")
def test_find_learning_rate(self):
find_learning_rate_model(
self.params(),
os.path.join(self.TEST_DIR, "test_find_learning_rate"),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# It's OK if serialization dir exists but is empty:
serialization_dir2 = os.path.join(self.TEST_DIR, "empty_directory")
assert not os.path.exists(serialization_dir2)
os.makedirs(serialization_dir2)
find_learning_rate_model(
self.params(),
serialization_dir2,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# It's not OK if serialization dir exists and has junk in it non-empty:
serialization_dir3 = os.path.join(self.TEST_DIR, "non_empty_directory")
assert not os.path.exists(serialization_dir3)
os.makedirs(serialization_dir3)
with open(os.path.join(serialization_dir3, "README.md"), "w") as f:
f.write("TEST")
with pytest.raises(ConfigurationError):
find_learning_rate_model(
self.params(),
serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# ... unless you use the --force flag.
find_learning_rate_model(
self.params(),
serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=True,
)
def test_find_learning_rate_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
FindLearningRate().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = ["find-lr", "path/to/params", serialization_arg, "serialization_dir"]
args = parser.parse_args(raw_args)
assert args.func == find_learning_rate_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
# config is required
with pytest.raises(SystemExit) as cm:
parser.parse_args(["find-lr", "-s", "serialization_dir"])
assert cm.exception.code == 2 # argparse code for incorrect usage
# serialization dir is required
with pytest.raises(SystemExit) as cm:
parser.parse_args(["find-lr", "path/to/params"])
assert cm.exception.code == 2 # argparse code for incorrect usage
@requires_multi_gpu
def test_find_learning_rate_multi_gpu(self):
params = self.params()
del params["trainer"]["cuda_device"]
params["distributed"] = Params({})
params["distributed"]["cuda_devices"] = [0, 1]
with pytest.raises(AssertionError) as execinfo:
find_learning_rate_model(
params,
os.path.join(self.TEST_DIR, "test_find_learning_rate_multi_gpu"),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
assert "DistributedDataParallel" in str(execinfo.value)
class TestSearchLearningRate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"cuda_device": -1, "num_epochs": 2, "optimizer": "adam"},
}
)
all_datasets = datasets_from_params(params)
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
instances=(instance for dataset in all_datasets.values() for instance in dataset),
)
model = Model.from_params(vocab=vocab, params=params.pop("model"))
train_data = all_datasets["train"]
train_data.index_with(vocab)
data_loader = DataLoader.from_params(dataset=train_data, params=params.pop("data_loader"))
trainer_params = params.pop("trainer")
serialization_dir = os.path.join(self.TEST_DIR, "test_search_learning_rate")
self.trainer = Trainer.from_params(
model=model,
serialization_dir=serialization_dir,
data_loader=data_loader,
train_data=train_data,
params=trainer_params,
validation_data=None,
validation_iterator=None,
)
def test_search_learning_rate_with_num_batches_less_than_ten(self):
with pytest.raises(ConfigurationError):
search_learning_rate(self.trainer, num_batches=9)
def test_search_learning_rate_linear_steps(self):
learning_rates_losses = search_learning_rate(self.trainer, linear_steps=True)
assert len(learning_rates_losses) > 1
def test_search_learning_rate_without_stopping_factor(self):
learning_rates, losses = search_learning_rate(
self.trainer, num_batches=100, stopping_factor=None
)
assert len(learning_rates) == 101
assert len(losses) == 101
| allennlp-master | tests/commands/find_learning_rate_test.py |
import argparse
import copy
import json
import logging
import math
import os
import re
import shutil
from collections import OrderedDict, Counter
from typing import Iterable, Optional, List, Dict, Any
import pytest
import torch
from allennlp.commands.train import Train, train_model, train_model_from_args, TrainModel
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase, cpu_or_gpu
from allennlp.data import DatasetReader, Instance, Vocabulary
from allennlp.data.dataloader import TensorDict
from allennlp.models import load_archive, Model
from allennlp.models.archival import CONFIG_NAME
from allennlp.training import BatchCallback, GradientDescentTrainer
from allennlp.training.learning_rate_schedulers import (
ExponentialLearningRateScheduler,
LearningRateScheduler,
)
SEQUENCE_TAGGING_DATA_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
SEQUENCE_TAGGING_SHARDS_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "shards" / "*")
@BatchCallback.register("training_data_logger")
class TrainingDataLoggerBatchCallback(BatchCallback):
def __call__( # type: ignore
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[TensorDict],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
if is_training:
logger = logging.getLogger(__name__)
for batch in batch_inputs:
for metadata in batch["metadata"]:
logger.info(f"First word from training data: '{metadata['words'][0]}'") # type: ignore
_seen_training_devices = set()
@BatchCallback.register("training_device_logger")
class TrainingDeviceLoggerBatchCallback(BatchCallback):
def __call__( # type: ignore
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[TensorDict],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
global _seen_training_devices
for tensor in trainer.model.parameters():
_seen_training_devices.add(tensor.device)
class TestTrain(AllenNlpTestCase):
DEFAULT_PARAMS = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
def test_train_model(self):
params = lambda: copy.deepcopy(self.DEFAULT_PARAMS)
train_model(params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
# It's OK if serialization dir exists but is empty:
serialization_dir2 = os.path.join(self.TEST_DIR, "empty_directory")
assert not os.path.exists(serialization_dir2)
os.makedirs(serialization_dir2)
train_model(params(), serialization_dir=serialization_dir2)
# It's not OK if serialization dir exists and has junk in it non-empty:
serialization_dir3 = os.path.join(self.TEST_DIR, "non_empty_directory")
assert not os.path.exists(serialization_dir3)
os.makedirs(serialization_dir3)
with open(os.path.join(serialization_dir3, "README.md"), "w") as f:
f.write("TEST")
with pytest.raises(ConfigurationError):
train_model(params(), serialization_dir=serialization_dir3)
# It's also not OK if serialization dir is a real serialization dir:
with pytest.raises(ConfigurationError):
train_model(params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
# But it's OK if serialization dir exists and --recover is specified:
train_model(
params(),
serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"),
recover=True,
)
# It's ok serialization dir exists and --force is specified (it will be deleted):
train_model(
params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"), force=True
)
# But --force and --recover cannot both be specified
with pytest.raises(ConfigurationError):
train_model(
params(),
serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"),
force=True,
recover=True,
)
@cpu_or_gpu
def test_detect_gpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
global _seen_training_devices
_seen_training_devices.clear()
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_detect_gpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
if torch.cuda.device_count() == 0:
assert seen_training_device.type == "cpu"
else:
assert seen_training_device.type == "cuda"
@cpu_or_gpu
def test_force_gpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
params["trainer"]["cuda_device"] = 0
global _seen_training_devices
_seen_training_devices.clear()
if torch.cuda.device_count() == 0:
with pytest.raises(ConfigurationError):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_gpu"))
else:
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_gpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
assert seen_training_device.type == "cuda"
@cpu_or_gpu
def test_force_cpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
params["trainer"]["cuda_device"] = -1
global _seen_training_devices
_seen_training_devices.clear()
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_cpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
assert seen_training_device.type == "cpu"
@cpu_or_gpu
def test_train_model_distributed(self):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
assert "metrics.json" in serialized_files
# Make sure the metrics look right.
with open(os.path.join(out_dir, "metrics.json")) as f:
metrics = json.load(f)
assert metrics["peak_worker_0_memory_MB"] > 0
assert metrics["peak_worker_1_memory_MB"] > 0
if torch.cuda.device_count() >= 2:
assert metrics["peak_gpu_0_memory_MB"] > 0
assert metrics["peak_gpu_1_memory_MB"] > 0
# Check we can load the serialized model
assert load_archive(out_dir).model
@cpu_or_gpu
@pytest.mark.parametrize("lazy", [True, False])
def test_train_model_distributed_with_sharded_reader(self, lazy):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {
"type": "sharded",
"base_reader": {"type": "sequence_tagging"},
"lazy": lazy,
},
"train_data_path": SEQUENCE_TAGGING_SHARDS_PATH,
"validation_data_path": SEQUENCE_TAGGING_SHARDS_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
# Check we can load the serialized model
archive = load_archive(out_dir)
assert archive.model
# Check that we created a vocab from all the shards.
tokens = archive.model.vocab._token_to_index["tokens"].keys()
assert tokens == {
"@@PADDING@@",
"@@UNKNOWN@@",
"are",
".",
"animals",
"plants",
"vehicles",
"cats",
"dogs",
"snakes",
"birds",
"ferns",
"trees",
"flowers",
"vegetables",
"cars",
"buses",
"planes",
"rockets",
}
# TODO: This is somewhat brittle. Make these constants in trainer.py.
train_early = "finishing training early!"
validation_early = "finishing validation early!"
train_complete = "completed its entire epoch (training)."
validation_complete = "completed its entire epoch (validation)."
# There are three shards, but only two workers, so the first worker will have to discard some data.
with open(os.path.join(out_dir, "out_worker0.log")) as f:
worker0_log = f.read()
assert train_early in worker0_log
assert validation_early in worker0_log
assert train_complete not in worker0_log
assert validation_complete not in worker0_log
with open(os.path.join(out_dir, "out_worker1.log")) as f:
worker1_log = f.read()
assert train_early not in worker1_log
assert validation_early not in worker1_log
assert train_complete in worker1_log
assert validation_complete in worker1_log
@cpu_or_gpu
@pytest.mark.parametrize("lazy", [True, False])
def test_train_model_distributed_without_sharded_reader(self, lazy: bool):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
num_epochs = 2
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging", "lazy": lazy},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 1},
"trainer": {
"num_epochs": num_epochs,
"optimizer": "adam",
"batch_callbacks": [
"tests.commands.train_test.TrainingDataLoggerBatchCallback"
],
},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
# Check we can load the serialized model
archive = load_archive(out_dir)
assert archive.model
# Check that we created a vocab from all the shards.
tokens = set(archive.model.vocab._token_to_index["tokens"].keys())
assert tokens == {
"@@PADDING@@",
"@@UNKNOWN@@",
"are",
".",
"animals",
"cats",
"dogs",
"snakes",
"birds",
}
train_complete = "completed its entire epoch (training)."
validation_complete = "completed its entire epoch (validation)."
import re
pattern = re.compile(r"First word from training data: '([^']*)'")
first_word_counts = Counter() # type: ignore
with open(os.path.join(out_dir, "out_worker0.log")) as f:
worker0_log = f.read()
assert train_complete in worker0_log
assert validation_complete in worker0_log
for first_word in pattern.findall(worker0_log):
first_word_counts[first_word] += 1
with open(os.path.join(out_dir, "out_worker1.log")) as f:
worker1_log = f.read()
assert train_complete in worker1_log
assert validation_complete in worker1_log
for first_word in pattern.findall(worker1_log):
first_word_counts[first_word] += 1
assert first_word_counts == {
"cats": num_epochs,
"dogs": num_epochs,
"snakes": num_epochs,
"birds": num_epochs,
}
def test_distributed_raises_error_with_no_gpus(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {},
}
)
with pytest.raises(ConfigurationError):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
def test_train_saves_all_keys_in_config(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"pytorch_seed": 42,
"numpy_seed": 42,
"random_seed": 42,
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
serialization_dir = os.path.join(self.TEST_DIR, "test_train_model")
params_as_dict = (
params.as_ordered_dict()
) # Do it here as train_model will pop all the values.
train_model(params, serialization_dir=serialization_dir)
config_path = os.path.join(serialization_dir, CONFIG_NAME)
with open(config_path) as config:
saved_config_as_dict = OrderedDict(json.load(config))
assert params_as_dict == saved_config_as_dict
def test_error_is_throw_when_cuda_device_is_not_available(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": "test_fixtures/data/sequence_tagging.tsv",
"validation_data_path": "test_fixtures/data/sequence_tagging.tsv",
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": 2,
"cuda_device": torch.cuda.device_count(),
"optimizer": "adam",
},
}
)
with pytest.raises(ConfigurationError, match="Experiment specified"):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
def test_train_with_test_set(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_with_test_set"))
def test_train_number_of_steps(self):
number_of_epochs = 2
last_num_steps_per_epoch: Optional[int] = None
@LearningRateScheduler.register("mock")
class MockLRScheduler(ExponentialLearningRateScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, num_steps_per_epoch: int):
super().__init__(optimizer)
nonlocal last_num_steps_per_epoch
last_num_steps_per_epoch = num_steps_per_epoch
batch_callback_counter = 0
@BatchCallback.register("counter")
class CounterBatchCallback(BatchCallback):
def __call__(
self,
trainer: GradientDescentTrainer,
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
nonlocal batch_callback_counter
if is_training:
batch_callback_counter += 1
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": number_of_epochs,
"optimizer": "adam",
"learning_rate_scheduler": {"type": "mock"},
"batch_callbacks": ["counter"],
},
}
)
train_model(
params.duplicate(), serialization_dir=os.path.join(self.TEST_DIR, "train_normal")
)
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
normal_steps_per_epoch = last_num_steps_per_epoch
original_batch_size = params["data_loader"]["batch_size"]
params["data_loader"]["batch_size"] = 1
train_model(
params.duplicate(), serialization_dir=os.path.join(self.TEST_DIR, "train_with_bs1")
)
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
assert normal_steps_per_epoch == math.ceil(last_num_steps_per_epoch / original_batch_size)
params["data_loader"]["batch_size"] = original_batch_size
params["trainer"]["num_gradient_accumulation_steps"] = 3
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_with_ga"))
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
assert math.ceil(normal_steps_per_epoch / 3) == last_num_steps_per_epoch
def test_train_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Train().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = ["train", "path/to/params", serialization_arg, "serialization_dir"]
args = parser.parse_args(raw_args)
assert args.func == train_model_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
# config is required
with pytest.raises(SystemExit) as cm:
args = parser.parse_args(["train", "-s", "serialization_dir"])
assert cm.exception.code == 2 # argparse code for incorrect usage
# serialization dir is required
with pytest.raises(SystemExit) as cm:
args = parser.parse_args(["train", "path/to/params"])
assert cm.exception.code == 2 # argparse code for incorrect usage
def test_train_model_can_instantiate_from_params(self):
params = Params.from_file(self.FIXTURES_ROOT / "simple_tagger" / "experiment.json")
# Can instantiate from base class params
TrainModel.from_params(
params=params, serialization_dir=self.TEST_DIR, local_rank=0, batch_weight_key=""
)
def test_train_can_fine_tune_model_from_archive(self):
params = Params.from_file(
self.FIXTURES_ROOT / "basic_classifier" / "experiment_from_archive.jsonnet"
)
train_loop = TrainModel.from_params(
params=params, serialization_dir=self.TEST_DIR, local_rank=0, batch_weight_key=""
)
train_loop.run()
model = Model.from_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
# This is checking that the vocabulary actually got extended. The data that we're using for
# training is different from the data we used to produce the model archive, and we set
# parameters such that the vocab should have been extended.
assert train_loop.model.vocab.get_vocab_size() > model.vocab.get_vocab_size()
@DatasetReader.register("lazy-test")
class LazyFakeReader(DatasetReader):
def __init__(self) -> None:
super().__init__(lazy=True)
self.reader = DatasetReader.from_params(Params({"type": "sequence_tagging", "lazy": True}))
def _read(self, file_path: str) -> Iterable[Instance]:
"""
Reads some data from the `file_path` and returns the instances.
"""
return self.reader.read(file_path)
class TestTrainOnLazyDataset(AllenNlpTestCase):
def test_train_model(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "lazy-test"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_lazy_model"))
def test_train_with_test_set(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "lazy-test"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "lazy_test_set"))
def test_train_nograd_regex(self):
params_get = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
serialization_dir = os.path.join(self.TEST_DIR, "test_train_nograd")
regex_lists = [[], [".*text_field_embedder.*"], [".*text_field_embedder.*", ".*encoder.*"]]
for regex_list in regex_lists:
params = params_get()
params["trainer"]["no_grad"] = regex_list
shutil.rmtree(serialization_dir, ignore_errors=True)
model = train_model(params, serialization_dir=serialization_dir)
# If regex is matched, parameter name should have requires_grad False
# Or else True
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in regex_list):
assert not parameter.requires_grad
else:
assert parameter.requires_grad
# If all parameters have requires_grad=False, then error.
params = params_get()
params["trainer"]["no_grad"] = ["*"]
shutil.rmtree(serialization_dir, ignore_errors=True)
with pytest.raises(Exception):
train_model(params, serialization_dir=serialization_dir)
class TestDryRun(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
def test_dry_run_doesnt_overwrite_vocab(self):
vocab_path = self.TEST_DIR / "vocabulary"
os.mkdir(vocab_path)
# Put something in the vocab directory
with open(vocab_path / "test.txt", "a+") as open_file:
open_file.write("test")
# It should raise error if vocab dir is non-empty
with pytest.raises(ConfigurationError):
train_model(self.params, self.TEST_DIR, dry_run=True)
def test_dry_run_makes_vocab(self):
vocab_path = self.TEST_DIR / "vocabulary"
train_model(self.params, self.TEST_DIR, dry_run=True)
vocab_files = os.listdir(vocab_path)
assert set(vocab_files) == {
".lock",
"labels.txt",
"non_padded_namespaces.txt",
"tokens.txt",
}
with open(vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
tokens.sort()
assert tokens == [".", "@@UNKNOWN@@", "animals", "are", "birds", "cats", "dogs", "snakes"]
with open(vocab_path / "labels.txt") as f:
labels = [line.strip() for line in f]
labels.sort()
assert labels == ["N", "V"]
def test_dry_run_with_extension(self):
existing_serialization_dir = self.TEST_DIR / "existing"
extended_serialization_dir = self.TEST_DIR / "extended"
existing_vocab_path = existing_serialization_dir / "vocabulary"
extended_vocab_path = extended_serialization_dir / "vocabulary"
vocab = Vocabulary()
vocab.add_token_to_namespace("some_weird_token_1", namespace="tokens")
vocab.add_token_to_namespace("some_weird_token_2", namespace="tokens")
os.makedirs(existing_serialization_dir, exist_ok=True)
vocab.save_to_files(existing_vocab_path)
self.params["vocabulary"] = {}
self.params["vocabulary"]["type"] = "extend"
self.params["vocabulary"]["directory"] = str(existing_vocab_path)
self.params["vocabulary"]["min_count"] = {"tokens": 3}
train_model(self.params, extended_serialization_dir, dry_run=True)
vocab_files = os.listdir(extended_vocab_path)
assert set(vocab_files) == {
".lock",
"labels.txt",
"non_padded_namespaces.txt",
"tokens.txt",
}
with open(extended_vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
assert tokens[0] == "@@UNKNOWN@@"
assert tokens[1] == "some_weird_token_1"
assert tokens[2] == "some_weird_token_2"
tokens.sort()
assert tokens == [
".",
"@@UNKNOWN@@",
"animals",
"are",
"some_weird_token_1",
"some_weird_token_2",
]
with open(extended_vocab_path / "labels.txt") as f:
labels = [line.strip() for line in f]
labels.sort()
assert labels == ["N", "V"]
def test_dry_run_without_extension(self):
existing_serialization_dir = self.TEST_DIR / "existing"
extended_serialization_dir = self.TEST_DIR / "extended"
existing_vocab_path = existing_serialization_dir / "vocabulary"
extended_vocab_path = extended_serialization_dir / "vocabulary"
vocab = Vocabulary()
# if extend is False, its users responsibility to make sure that dataset instances
# will be indexible by provided vocabulary. At least @@UNKNOWN@@ should be present in
# namespace for which there could be OOV entries seen in dataset during indexing.
# For `tokens` ns, new words will be seen but `tokens` has @@UNKNOWN@@ token.
# but for 'labels' ns, there is no @@UNKNOWN@@ so required to add 'N', 'V' upfront.
vocab.add_token_to_namespace("some_weird_token_1", namespace="tokens")
vocab.add_token_to_namespace("some_weird_token_2", namespace="tokens")
vocab.add_token_to_namespace("N", namespace="labels")
vocab.add_token_to_namespace("V", namespace="labels")
os.makedirs(existing_serialization_dir, exist_ok=True)
vocab.save_to_files(existing_vocab_path)
self.params["vocabulary"] = {}
self.params["vocabulary"]["type"] = "from_files"
self.params["vocabulary"]["directory"] = str(existing_vocab_path)
train_model(self.params, extended_serialization_dir, dry_run=True)
with open(extended_vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
assert tokens[0] == "@@UNKNOWN@@"
assert tokens[1] == "some_weird_token_1"
assert tokens[2] == "some_weird_token_2"
assert len(tokens) == 3
def test_make_vocab_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Train().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = [
"train",
"path/to/params",
serialization_arg,
"serialization_dir",
"--dry-run",
]
args = parser.parse_args(raw_args)
assert args.func == train_model_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
assert args.dry_run
def test_warn_validation_loader_batches_per_epoch(self):
self.params["data_loader"]["batches_per_epoch"] = 3
with pytest.warns(UserWarning, match="batches_per_epoch"):
train_model(self.params, self.TEST_DIR, dry_run=True)
| allennlp-master | tests/commands/train_test.py |
import os
import sys
import pytest
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
class TestBuildVocabCommand(AllenNlpTestCase):
def test_build_vocab(self):
output_path = self.TEST_DIR / "vocab.tar.gz"
sys.argv = [
"allennlp",
"build-vocab",
str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet"),
str(output_path),
]
main()
assert os.path.exists(output_path)
vocab = Vocabulary.from_files(output_path)
vocab.get_token_index("neg", "labels") == 0
# If we try again, this time we should get a RuntimeError because the vocab archive
# already exists at the output path.
with pytest.raises(RuntimeError, match="already exists"):
main()
# But now if add the '--force' argument, it will override the file.
sys.argv.append("--force")
main()
| allennlp-master | tests/commands/build_vocab_test.py |
import argparse
import json
from typing import Iterator, List, Dict
import torch
from flaky import flaky
import pytest
from allennlp.commands.evaluate import evaluate_from_args, Evaluate, evaluate
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataloader import TensorDict
from allennlp.models import Model
class DummyDataLoader:
def __init__(self, outputs: List[TensorDict]) -> None:
super().__init__()
self._outputs = outputs
def __iter__(self) -> Iterator[TensorDict]:
yield from self._outputs
def __len__(self):
return len(self._outputs)
class DummyModel(Model):
def __init__(self) -> None:
super().__init__(None) # type: ignore
def forward(self, **kwargs) -> Dict[str, torch.Tensor]: # type: ignore
return kwargs
class TestEvaluate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.parser = argparse.ArgumentParser(description="Testing")
subparsers = self.parser.add_subparsers(title="Commands", metavar="")
Evaluate().add_subparser(subparsers)
def test_evaluate_calculates_average_loss(self):
losses = [7.0, 9.0, 8.0]
outputs = [{"loss": torch.Tensor([loss])} for loss in losses]
data_loader = DummyDataLoader(outputs)
metrics = evaluate(DummyModel(), data_loader, -1, "")
assert metrics["loss"] == pytest.approx(8.0)
def test_evaluate_calculates_average_loss_with_weights(self):
losses = [7.0, 9.0, 8.0]
weights = [10, 2, 1.5]
inputs = zip(losses, weights)
outputs = [
{"loss": torch.Tensor([loss]), "batch_weight": torch.Tensor([weight])}
for loss, weight in inputs
]
data_loader = DummyDataLoader(outputs)
metrics = evaluate(DummyModel(), data_loader, -1, "batch_weight")
assert metrics["loss"] == pytest.approx((70 + 18 + 12) / 13.5)
@flaky
def test_evaluate_from_args(self):
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
]
args = self.parser.parse_args(kebab_args)
metrics = evaluate_from_args(args)
assert metrics.keys() == {
"accuracy",
"accuracy3",
"precision-overall",
"recall-overall",
"f1-measure-overall",
"loss",
}
def test_output_file_evaluate_from_args(self):
output_file = str(self.TEST_DIR / "metrics.json")
predictions_output_file = str(self.TEST_DIR / "predictions.jsonl")
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
"--output-file",
output_file,
"--predictions-output-file",
predictions_output_file,
]
args = self.parser.parse_args(kebab_args)
computed_metrics = evaluate_from_args(args)
with open(output_file, "r") as file:
saved_metrics = json.load(file)
assert computed_metrics == saved_metrics
with open(predictions_output_file, "r") as file:
for line in file:
prediction = json.loads(line.strip())
assert "tags" in prediction
def test_evaluate_works_with_vocab_expansion(self):
archive_path = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
# snli2 has a extra token ("seahorse") in it.
evaluate_data_path = str(
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus2.jsonl"
)
embeddings_filename = str(
self.FIXTURES_ROOT / "data" / "unawarded_embeddings.gz"
) # has only unawarded vector
embedding_sources_mapping = json.dumps(
{"_text_field_embedder.token_embedder_tokens": embeddings_filename}
)
kebab_args = ["evaluate", archive_path, evaluate_data_path, "--cuda-device", "-1"]
# TODO(mattg): the unawarded_embeddings.gz file above doesn't exist, but this test still
# passes. This suggests that vocab extension in evaluate isn't currently doing anything,
# and so it is broken.
# Evaluate 1 with no vocab expansion,
# Evaluate 2 with vocab expansion with no pretrained embedding file.
# Evaluate 3 with vocab expansion with given pretrained embedding file.
metrics_1 = evaluate_from_args(self.parser.parse_args(kebab_args))
metrics_2 = evaluate_from_args(self.parser.parse_args(kebab_args + ["--extend-vocab"]))
metrics_3 = evaluate_from_args(
self.parser.parse_args(
kebab_args + ["--embedding-sources-mapping", embedding_sources_mapping]
)
)
assert metrics_1 != metrics_2
assert metrics_2 != metrics_3
| allennlp-master | tests/commands/evaluate_test.py |
allennlp-master | tests/commands/__init__.py |
|
from typing import Dict
import torch
from allennlp.commands.train import train_model
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models import load_archive, Model
SEQUENCE_TAGGING_DATA_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
@Model.register("constant")
class ConstantModel(Model):
def forward(self, *inputs) -> Dict[str, torch.Tensor]:
return {"class": torch.tensor(98)}
class TestTrain(AllenNlpTestCase):
def test_train_model(self):
params = lambda: Params(
{
"model": {"type": "constant"},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"type": "no_op"},
}
)
serialization_dir = self.TEST_DIR / "serialization_directory"
train_model(params(), serialization_dir=serialization_dir)
archive = load_archive(str(serialization_dir / "model.tar.gz"))
model = archive.model
assert model.forward(torch.tensor([1, 2, 3]))["class"] == torch.tensor(98)
assert model.vocab.get_vocab_size() == 9
| allennlp-master | tests/commands/no_op_train_test.py |
import os
from allennlp.common.testing import AllenNlpTestCase
from allennlp.commands.test_install import _get_module_root
class TestTestInstall(AllenNlpTestCase):
def test_get_module_root(self):
"""
When a user runs `allennlp test-install`, we have no idea where
they're running it from, so we do an `os.chdir` to the _module_
root in order to get all the paths in the fixtures to resolve properly.
The logic within `allennlp test-install` is pretty hard to test in
its entirety, so this test is verifies that the `os.chdir` component
works properly by checking that we correctly find the path to
`os.chdir` to.
"""
project_root = _get_module_root()
assert os.path.exists(os.path.join(project_root, "__main__.py"))
| allennlp-master | tests/commands/test_install_test.py |
import sys
import pytest
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
class TestCachedPathCommand(AllenNlpTestCase):
def test_local_file(self, capsys):
sys.argv = ["allennlp", "cached-path", "--cache-dir", str(self.TEST_DIR), "README.md"]
main()
captured = capsys.readouterr()
assert "README.md" in captured.out
def test_inspect_empty_cache(self, capsys):
sys.argv = ["allennlp", "cached-path", "--cache-dir", str(self.TEST_DIR), "--inspect"]
main()
captured = capsys.readouterr()
assert "Cached resources:" in captured.out
assert "Total size: 0B" in captured.out
def test_inspect_with_bad_options(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--inspect",
"--extract-archive",
]
with pytest.raises(RuntimeError, match="--extract-archive"):
main()
def test_remove_with_bad_options(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
"--extract-archive",
"*",
]
with pytest.raises(RuntimeError, match="--extract-archive"):
main()
def test_remove_with_missing_positionals(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
]
with pytest.raises(RuntimeError, match="Missing positional"):
main()
def test_remove_empty_cache(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
"*",
]
main()
captured = capsys.readouterr()
assert "Reclaimed 0B of space" in captured.out
| allennlp-master | tests/commands/cached_path_test.py |
import shutil
import sys
import pytest
from overrides import overrides
from allennlp.commands import main
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import ConfigurationError
from allennlp.common.plugins import discover_plugins
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import push_python_path, pushd
class TestMain(AllenNlpTestCase):
def test_fails_on_unknown_command(self):
sys.argv = [
"bogus", # command
"unknown_model", # model_name
"bogus file", # input_file
"--output-file",
"bogus out file",
"--silent",
]
with pytest.raises(SystemExit) as cm:
main()
assert cm.value.code == 2 # argparse code for incorrect usage
def test_subcommand_overrides(self):
called = False
def do_nothing(_):
nonlocal called
called = True
@Subcommand.register("evaluate", exist_ok=True)
class FakeEvaluate(Subcommand): # noqa
@overrides
def add_subparser(self, parser):
subparser = parser.add_parser(self.name, description="fake", help="fake help")
subparser.set_defaults(func=do_nothing)
return subparser
sys.argv = ["allennlp", "evaluate"]
main()
assert called
def test_other_modules(self):
# Create a new package in a temporary dir
packagedir = self.TEST_DIR / "testpackage"
packagedir.mkdir()
(packagedir / "__init__.py").touch()
# And add that directory to the path
with push_python_path(self.TEST_DIR):
# Write out a duplicate model there, but registered under a different name.
from allennlp.models import simple_tagger
with open(simple_tagger.__file__) as model_file:
code = model_file.read().replace(
"""@Model.register("simple_tagger")""",
"""@Model.register("duplicate-test-tagger")""",
)
with open(packagedir / "model.py", "w") as new_model_file:
new_model_file.write(code)
# Copy fixture there too.
shutil.copy(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv", self.TEST_DIR)
data_path = str(self.TEST_DIR / "sequence_tagging.tsv")
# Write out config file
config_path = self.TEST_DIR / "config.json"
config_json = """{
"model": {
"type": "duplicate-test-tagger",
"text_field_embedder": {
"token_embedders": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": "$$$",
"validation_data_path": "$$$",
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": 2,
"optimizer": "adam"
}
}""".replace(
"$$$", data_path
)
with open(config_path, "w") as config_file:
config_file.write(config_json)
serialization_dir = self.TEST_DIR / "serialization"
# Run train with using the non-allennlp module.
sys.argv = ["allennlp", "train", str(config_path), "-s", str(serialization_dir)]
# Shouldn't be able to find the model.
with pytest.raises(ConfigurationError):
main()
# Now add the --include-package flag and it should work.
# We also need to add --recover since the output directory already exists.
sys.argv.extend(["--recover", "--include-package", "testpackage"])
main()
# Rewrite out config file, but change a value.
with open(config_path, "w") as new_config_file:
new_config_file.write(config_json.replace('"num_epochs": 2,', '"num_epochs": 4,'))
# This should fail because the config.json does not match that in the serialization directory.
with pytest.raises(ConfigurationError):
main()
def test_file_plugin_loaded(self):
plugins_root = self.FIXTURES_ROOT / "plugins"
sys.argv = ["allennlp"]
available_plugins = set(discover_plugins())
assert available_plugins == set()
with pushd(plugins_root):
main()
subcommands_available = Subcommand.list_available()
assert "d" in subcommands_available
| allennlp-master | tests/commands/main_test.py |
import os
import json
import sys
import pathlib
import tempfile
import io
from contextlib import redirect_stdout
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
class TestPrintResults(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.out_dir1 = pathlib.Path(tempfile.mkdtemp(prefix="hi"))
self.out_dir2 = pathlib.Path(tempfile.mkdtemp(prefix="hi"))
self.directory1 = self.TEST_DIR / "results1"
self.directory2 = self.TEST_DIR / "results2"
self.directory3 = self.TEST_DIR / "results3"
os.makedirs(self.directory1)
os.makedirs(self.directory2)
os.makedirs(self.directory3)
json.dump(
{"train": 1, "test": 2, "dev": 3},
open(os.path.join(self.directory1 / "metrics.json"), "w+"),
)
json.dump(
{"train": 4, "dev": 5}, open(os.path.join(self.directory2 / "metrics.json"), "w+")
)
json.dump(
{"train": 6, "dev": 7}, open(os.path.join(self.directory3 / "cool_metrics.json"), "w+")
)
def test_print_results(self):
kebab_args = [
"__main__.py",
"print-results",
str(self.TEST_DIR),
"--keys",
"train",
"dev",
"test",
]
sys.argv = kebab_args
with io.StringIO() as buf, redirect_stdout(buf):
main()
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[0] == "model_run, train, dev, test"
expected_results = {
(str(self.directory1) + "/metrics.json", "1", "3", "2"),
(str(self.directory2) + "/metrics.json", "4", "5", "N/A"),
}
results = {tuple(line.split(", ")) for line in lines[1:]}
assert results == expected_results
def test_print_results_with_metrics_filename(self):
kebab_args = [
"__main__.py",
"print-results",
str(self.TEST_DIR),
"--keys",
"train",
"dev",
"test",
"--metrics-filename",
"cool_metrics.json",
]
sys.argv = kebab_args
with io.StringIO() as buf, redirect_stdout(buf):
main()
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[0] == "model_run, train, dev, test"
expected_results = {(str(self.directory3) + "/cool_metrics.json", "6", "7", "N/A")}
results = {tuple(line.split(", ")) for line in lines[1:]}
assert results == expected_results
| allennlp-master | tests/commands/print_results_test.py |
from numpy.testing import assert_almost_equal
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Maxout
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestMaxout(AllenNlpTestCase):
def test_init_checks_output_dims_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=2, output_dims=[5, 4, 3], pool_sizes=4, dropout=0.0)
def test_init_checks_pool_sizes_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=2, output_dims=5, pool_sizes=[4, 5, 2], dropout=0.0)
def test_init_checks_dropout_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=3, output_dims=5, pool_sizes=4, dropout=[0.2, 0.3])
def test_forward_gives_correct_output(self):
params = Params(
{"input_dim": 2, "output_dims": 3, "pool_sizes": 4, "dropout": 0.0, "num_layers": 2}
)
maxout = Maxout.from_params(params)
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(maxout)
input_tensor = torch.FloatTensor([[-3, 1]])
output = maxout(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand
# The output of the first maxout layer is [-1, -1, -1], since the
# matrix multiply gives us [-2]*12. Reshaping and maxing
# produces [-2, -2, -2] and the bias increments these values.
# The second layer output is [-2, -2, -2], since the matrix
# matrix multiply gives us [-3]*12. Reshaping and maxing
# produces [-3, -3, -3] and the bias increments these values.
assert_almost_equal(output, [[-2, -2, -2]])
| allennlp-master | tests/modules/maxout_test.py |
import json
import os
import warnings
from typing import List
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.data.token_indexers.single_id_token_indexer import SingleIdTokenIndexer
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.modules.elmo import _ElmoBiLm, _ElmoCharacterEncoder, Elmo
from allennlp.modules.token_embedders import ElmoTokenEmbedder
from allennlp.nn.util import remove_sentence_boundaries
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class ElmoTestCase(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
self.options_file = str(self.elmo_fixtures_path / "options.json")
self.weight_file = str(self.elmo_fixtures_path / "lm_weights.hdf5")
self.sentences_json_file = str(self.elmo_fixtures_path / "sentences.json")
self.sentences_txt_file = str(self.elmo_fixtures_path / "sentences.txt")
def _load_sentences_embeddings(self):
"""
Load the test sentences and the expected LM embeddings.
These files loaded in this method were created with a batch-size of 3.
Due to idiosyncrasies with TensorFlow, the 30 sentences in sentences.json are split into 3 files in which
the k-th sentence in each is from batch k.
This method returns a (sentences, embeddings) pair where each is a list of length batch_size.
Each list contains a sublist with total_sentence_count / batch_size elements. As with the original files,
the k-th element in the sublist is in batch k.
"""
with open(self.sentences_json_file) as fin:
sentences = json.load(fin)
# the expected embeddings
expected_lm_embeddings = []
for k in range(len(sentences)):
embed_fname = os.path.join(self.elmo_fixtures_path, "lm_embeddings_{}.hdf5".format(k))
expected_lm_embeddings.append([])
with h5py.File(embed_fname, "r") as fin:
for i in range(10):
sent_embeds = fin["%s" % i][...]
sent_embeds_concat = numpy.concatenate(
(sent_embeds[0, :, :], sent_embeds[1, :, :]), axis=-1
)
expected_lm_embeddings[-1].append(sent_embeds_concat)
return sentences, expected_lm_embeddings
@staticmethod
def get_vocab_and_both_elmo_indexed_ids(batch: List[List[str]]):
instances = []
indexer = ELMoTokenCharactersIndexer()
indexer2 = SingleIdTokenIndexer()
for sentence in batch:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer, "tokens": indexer2})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary.from_instances(instances)
dataset.index_instances(vocab)
return vocab, dataset.as_tensor_dict()["elmo"]
class TestElmoBiLm(ElmoTestCase):
def test_elmo_bilm(self):
# get the raw data
sentences, expected_lm_embeddings = self._load_sentences_embeddings()
# load the test model
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
# Deal with the data.
indexer = ELMoTokenCharactersIndexer()
# For each sentence, first create a TextField, then create an instance
instances = []
for batch in zip(*sentences):
for sentence in batch:
tokens = [Token(token) for token in sentence.split()]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
vocab = Vocabulary()
dataset = AllennlpDataset(instances, vocab)
# Now finally we can iterate through batches.
loader = PyTorchDataLoader(dataset, 3)
for i, batch in enumerate(loader):
lm_embeddings = elmo_bilm(batch["elmo"]["character_ids"]["elmo_tokens"])
top_layer_embeddings, mask = remove_sentence_boundaries(
lm_embeddings["activations"][2], lm_embeddings["mask"]
)
# check the mask lengths
lengths = mask.data.numpy().sum(axis=1)
batch_sentences = [sentences[k][i] for k in range(3)]
expected_lengths = [len(sentence.split()) for sentence in batch_sentences]
assert lengths.tolist() == expected_lengths
# get the expected embeddings and compare!
expected_top_layer = [expected_lm_embeddings[k][i] for k in range(3)]
for k in range(3):
assert numpy.allclose(
top_layer_embeddings[k, : lengths[k], :].data.numpy(),
expected_top_layer[k],
atol=1.0e-6,
)
def test_elmo_char_cnn_cache_does_not_raise_error_for_uncached_words(self):
sentences = [["This", "is", "OOV"], ["so", "is", "this"]]
in_vocab_sentences = [["here", "is"], ["a", "vocab"]]
oov_tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)[1]
vocab, in_vocab_tensor = self.get_vocab_and_both_elmo_indexed_ids(in_vocab_sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file, vocab_to_cache=words_to_cache)
elmo_bilm(
in_vocab_tensor["character_ids"]["elmo_tokens"], in_vocab_tensor["tokens"]["tokens"]
)
elmo_bilm(oov_tensor["character_ids"]["elmo_tokens"], oov_tensor["tokens"]["tokens"])
def test_elmo_bilm_can_cache_char_cnn_embeddings(self):
sentences = [["This", "is", "a", "sentence"], ["Here", "'s", "one"], ["Another", "one"]]
vocab, tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
elmo_bilm.eval()
no_cache = elmo_bilm(
tensor["character_ids"]["elmo_tokens"], tensor["character_ids"]["elmo_tokens"]
)
# ELMo is stateful, so we need to actually re-initialise it for this comparison to work.
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
cached = elmo_bilm(tensor["character_ids"]["elmo_tokens"], tensor["tokens"]["tokens"])
numpy.testing.assert_array_almost_equal(
no_cache["mask"].data.cpu().numpy(), cached["mask"].data.cpu().numpy()
)
for activation_cached, activation in zip(cached["activations"], no_cache["activations"]):
numpy.testing.assert_array_almost_equal(
activation_cached.data.cpu().numpy(), activation.data.cpu().numpy(), decimal=6
)
class TestElmo(ElmoTestCase):
def setup_method(self):
super().setup_method()
self.elmo = Elmo(self.options_file, self.weight_file, 2, dropout=0.0)
def _sentences_to_ids(self, sentences):
indexer = ELMoTokenCharactersIndexer()
# For each sentence, first create a TextField, then create an instance
instances = []
for sentence in sentences:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary()
dataset.index_instances(vocab)
return dataset.as_tensor_dict()["elmo"]["character_ids"]["elmo_tokens"]
def test_elmo(self):
# Correctness checks are in ElmoBiLm and ScalarMix, here we just add a shallow test
# to ensure things execute.
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
character_ids = self._sentences_to_ids(sentences)
output = self.elmo(character_ids)
elmo_representations = output["elmo_representations"]
mask = output["mask"]
assert len(elmo_representations) == 2
assert list(elmo_representations[0].size()) == [2, 7, 32]
assert list(elmo_representations[1].size()) == [2, 7, 32]
assert list(mask.size()) == [2, 7]
def test_elmo_keep_sentence_boundaries(self):
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
elmo = Elmo(
self.options_file, self.weight_file, 2, dropout=0.0, keep_sentence_boundaries=True
)
character_ids = self._sentences_to_ids(sentences)
output = elmo(character_ids)
elmo_representations = output["elmo_representations"]
mask = output["mask"]
assert len(elmo_representations) == 2
# Add 2 to the lengths because we're keeping the start and end of sentence tokens.
assert list(elmo_representations[0].size()) == [2, 7 + 2, 32]
assert list(elmo_representations[1].size()) == [2, 7 + 2, 32]
assert list(mask.size()) == [2, 7 + 2]
def test_elmo_4D_input(self):
sentences = [
[
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
],
[["1", "2"], ["1", "2", "3", "4", "5", "6", "7"]],
[["1", "2", "3", "4", "50", "60", "70"], ["The"]],
]
all_character_ids = []
for batch_sentences in sentences:
all_character_ids.append(self._sentences_to_ids(batch_sentences))
# (2, 3, 7, 50)
character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
embeddings_4d = self.elmo(character_ids)
# Run the individual batches.
embeddings_3d = []
for char_ids in all_character_ids:
self.elmo._elmo_lstm._elmo_lstm.reset_states()
embeddings_3d.append(self.elmo(char_ids))
for k in range(3):
numpy.testing.assert_array_almost_equal(
embeddings_4d["elmo_representations"][0][:, k, :, :].data.numpy(),
embeddings_3d[k]["elmo_representations"][0].data.numpy(),
)
def test_elmo_with_module(self):
# We will create the _ElmoBilm class and pass it in as a module.
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
character_ids = self._sentences_to_ids(sentences)
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
elmo = Elmo(None, None, 2, dropout=0.0, module=elmo_bilm)
output = elmo(character_ids)
elmo_representations = output["elmo_representations"]
assert len(elmo_representations) == 2
for k in range(2):
assert list(elmo_representations[k].size()) == [2, 7, 32]
def test_elmo_bilm_can_handle_higher_dimensional_input_with_cache(self):
sentences = [["This", "is", "a", "sentence"], ["Here", "'s", "one"], ["Another", "one"]]
vocab, tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
individual_dim = elmo_bilm(
tensor["character_ids"]["elmo_tokens"], tensor["tokens"]["tokens"]
)
elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
expanded_word_ids = torch.stack([tensor["tokens"]["tokens"] for _ in range(4)], dim=1)
expanded_char_ids = torch.stack(
[tensor["character_ids"]["elmo_tokens"] for _ in range(4)], dim=1
)
expanded_result = elmo_bilm(expanded_char_ids, expanded_word_ids)
split_result = [
x.squeeze(1) for x in torch.split(expanded_result["elmo_representations"][0], 1, dim=1)
]
for expanded in split_result:
numpy.testing.assert_array_almost_equal(
expanded.data.cpu().numpy(),
individual_dim["elmo_representations"][0].data.cpu().numpy(),
)
class TestElmoRequiresGrad(ElmoTestCase):
def _run_test(self, requires_grad):
embedder = ElmoTokenEmbedder(
self.options_file, self.weight_file, requires_grad=requires_grad
)
batch_size = 3
seq_len = 4
char_ids = torch.from_numpy(numpy.random.randint(0, 262, (batch_size, seq_len, 50)))
embeddings = embedder(char_ids)
loss = embeddings.sum()
loss.backward()
elmo_grads = [
param.grad for name, param in embedder.named_parameters() if "_elmo_lstm" in name
]
if requires_grad:
# None of the elmo grads should be None.
assert all(grad is not None for grad in elmo_grads)
else:
# All of the elmo grads should be None.
assert all(grad is None for grad in elmo_grads)
def test_elmo_requires_grad(self):
self._run_test(True)
def test_elmo_does_not_require_grad(self):
self._run_test(False)
class TestElmoTokenRepresentation(ElmoTestCase):
def test_elmo_token_representation(self):
# Load the test words and convert to char ids
with open(os.path.join(self.elmo_fixtures_path, "vocab_test.txt"), "r") as fin:
words = fin.read().strip().split("\n")
vocab = Vocabulary()
indexer = ELMoTokenCharactersIndexer()
tokens = [Token(word) for word in words]
indices = indexer.tokens_to_indices(tokens, vocab)
# There are 457 tokens. Reshape into 10 batches of 50 tokens.
sentences = []
for k in range(10):
char_indices = indices["elmo_tokens"][(k * 50) : ((k + 1) * 50)]
sentences.append(
indexer.as_padded_tensor_dict(
{"elmo_tokens": char_indices}, padding_lengths={"elmo_tokens": 50}
)["elmo_tokens"]
)
batch = torch.stack(sentences)
elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)
elmo_token_embedder_output = elmo_token_embedder(batch)
# Reshape back to a list of words and compare with ground truth. Need to also
# remove <S>, </S>
actual_embeddings = remove_sentence_boundaries(
elmo_token_embedder_output["token_embedding"], elmo_token_embedder_output["mask"]
)[0].data.numpy()
actual_embeddings = actual_embeddings.reshape(-1, actual_embeddings.shape[-1])
embedding_file = os.path.join(self.elmo_fixtures_path, "elmo_token_embeddings.hdf5")
with h5py.File(embedding_file, "r") as fin:
expected_embeddings = fin["embedding"][...]
assert numpy.allclose(actual_embeddings[: len(tokens)], expected_embeddings, atol=1e-6)
def test_elmo_token_representation_bos_eos(self):
# The additional <S> and </S> embeddings added by the embedder should be as expected.
indexer = ELMoTokenCharactersIndexer()
elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)
for correct_index, token in [[0, "<S>"], [2, "</S>"]]:
indices = indexer.tokens_to_indices([Token(token)], Vocabulary())
indices = torch.from_numpy(numpy.array(indices["elmo_tokens"])).view(1, 1, -1)
embeddings = elmo_token_embedder(indices)["token_embedding"]
assert numpy.allclose(
embeddings[0, correct_index, :].data.numpy(), embeddings[0, 1, :].data.numpy()
)
| allennlp-master | tests/modules/elmo_test.py |
import numpy
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.common.testing import AllenNlpTestCase
class TestStackedAlternatingLstm(AllenNlpTestCase):
def test_stacked_alternating_lstm_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
input_tensor = pack_padded_sequence(input_tensor, [5, 4, 2, 1], batch_first=True)
lstm = StackedAlternatingLstm(3, 7, 3)
output, _ = lstm(input_tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
def test_lstms_are_interleaved(self):
lstm = StackedAlternatingLstm(3, 7, 8)
for i, layer in enumerate(lstm.lstm_layers):
if i % 2 == 0:
assert layer.go_forward
else:
assert not layer.go_forward
| allennlp-master | tests/modules/stacked_alternating_lstm_test.py |
import pytest
import numpy
import torch
import torch.nn.init
from torch.nn.modules.rnn import LSTM
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.augmented_lstm import AugmentedLstm, AugmentedLSTMCell, BiAugmentedLstm
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.util import sort_batch_by_length
class TestAugmentedLSTM(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
tensor = torch.rand([5, 7, 10])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, 6:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 2, 6, 7])
self.random_tensor = tensor
self.sequence_lengths = sequence_lengths
def test_variable_length_sequences_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_variable_length_sequences_run_backward_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11, go_forward=False)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
augmented_lstm = AugmentedLstm(10, 11)
pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(augmented_lstm)
initializer(pytorch_lstm)
initial_state = torch.zeros([1, 5, 11])
initial_memory = torch.zeros([1, 5, 11])
# Use bigger numbers to avoid floating point instability.
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor * 5.0, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_output, augmented_state = augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
numpy.testing.assert_array_almost_equal(
pytorch_output_sequence.data.numpy(), augmented_output_sequence.data.numpy(), decimal=4
)
numpy.testing.assert_array_almost_equal(
pytorch_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
)
numpy.testing.assert_array_almost_equal(
pytorch_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
)
def test_augmented_lstm_works_with_highway_connections(self):
augmented_lstm = AugmentedLstm(10, 11, use_highway=True)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_lstm(lstm_input)
def test_augmented_lstm_throws_error_on_non_packed_sequence_input(self):
lstm = AugmentedLstm(3, 5)
tensor = torch.rand([5, 7, 9])
with pytest.raises(ConfigurationError):
lstm(tensor)
def test_augmented_lstm_is_initialized_with_correct_biases(self):
lstm = AugmentedLSTMCell(2, 3)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
# Non-highway case.
lstm = AugmentedLSTMCell(2, 3, use_highway=False)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
def test_dropout_is_not_applied_to_output_or_returned_hidden_states(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11, recurrent_dropout_probability=0.5)
output, (hidden_state, _) = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
# Test returned output sequence
num_hidden_dims_zero_across_timesteps = ((output_sequence.sum(1) == 0).sum()).item()
# If this is not True then dropout has been applied to the output of the LSTM
assert not num_hidden_dims_zero_across_timesteps
# Should not have dropout applied to the last hidden state as this is not used
# within the LSTM and makes it more consistent with the `torch.nn.LSTM` where
# dropout is not applied to any of it's output. This would also make it more
# consistent with the Keras LSTM implementation as well.
hidden_state = hidden_state.squeeze()
num_hidden_dims_zero_across_timesteps = ((hidden_state == 0).sum()).item()
assert not num_hidden_dims_zero_across_timesteps
def test_dropout_version_is_different_to_no_dropout(self):
augmented_lstm = AugmentedLstm(10, 11)
dropped_augmented_lstm = AugmentedLstm(10, 11, recurrent_dropout_probability=0.9)
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(augmented_lstm)
initializer(dropped_augmented_lstm)
initial_state = torch.randn([1, 5, 11])
initial_memory = torch.randn([1, 5, 11])
# If we use too bigger number like in the PyTorch test the dropout has no affect
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_output, augmented_state = augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output, dropped_state = dropped_augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output_sequence, _ = pad_packed_sequence(dropped_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_output_sequence.data.numpy(),
augmented_output_sequence.data.numpy(),
decimal=4,
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
)
def test_biaugmented_lstm(self):
for bidirectional in [True, False]:
bi_augmented_lstm = BiAugmentedLstm(
10, 11, 3, recurrent_dropout_probability=0.1, bidirectional=bidirectional
)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
bi_augmented_lstm(lstm_input)
| allennlp-master | tests/modules/augmented_lstm_test.py |
import torch
from allennlp.common import Params
from allennlp.modules import BiMpmMatching
from allennlp.common.testing import AllenNlpTestCase
class TestBiMPMMatching(AllenNlpTestCase):
def test_forward(self):
batch = 16
len1, len2 = 21, 24
seq_len1 = torch.randint(low=len1 - 10, high=len1 + 1, size=(batch,)).long()
seq_len2 = torch.randint(low=len2 - 10, high=len2 + 1, size=(batch,)).long()
mask1 = []
for w in seq_len1:
mask1.append([1] * w.item() + [0] * (len1 - w.item()))
mask1 = torch.tensor(mask1, dtype=torch.bool)
mask2 = []
for w in seq_len2:
mask2.append([1] * w.item() + [0] * (len2 - w.item()))
mask2 = torch.tensor(mask2, dtype=torch.bool)
d = 200 # hidden dimension
n = 20 # number of perspective
test1 = torch.randn(batch, len1, d)
test2 = torch.randn(batch, len2, d)
test1 = test1 * mask1.view(-1, len1, 1).expand(-1, len1, d)
test2 = test2 * mask2.view(-1, len2, 1).expand(-1, len2, d)
test1_fw, test1_bw = torch.split(test1, d // 2, dim=-1)
test2_fw, test2_bw = torch.split(test2, d // 2, dim=-1)
ml_fw = BiMpmMatching.from_params(Params({"is_forward": True, "num_perspectives": n}))
ml_bw = BiMpmMatching.from_params(Params({"is_forward": False, "num_perspectives": n}))
vecs_p_fw, vecs_h_fw = ml_fw(test1_fw, mask1, test2_fw, mask2)
vecs_p_bw, vecs_h_bw = ml_bw(test1_bw, mask1, test2_bw, mask2)
vecs_p, vecs_h = (
torch.cat(vecs_p_fw + vecs_p_bw, dim=2),
torch.cat(vecs_h_fw + vecs_h_bw, dim=2),
)
assert vecs_p.size() == torch.Size([batch, len1, 10 + 10 * n])
assert vecs_h.size() == torch.Size([batch, len2, 10 + 10 * n])
assert (
ml_fw.get_output_dim()
== ml_bw.get_output_dim()
== vecs_p.size(2) // 2
== vecs_h.size(2) // 2
)
| allennlp-master | tests/modules/bimpm_matching_test.py |
from numpy.testing import assert_almost_equal
import inspect
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import FeedForward
from allennlp.nn import InitializerApplicator, Initializer, Activation
from allennlp.common.testing import AllenNlpTestCase
class TestFeedForward(AllenNlpTestCase):
def test_can_construct_from_params(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 2
assert [isinstance(a, torch.nn.ReLU) for a in feedforward._activations]
assert len(feedforward._linear_layers) == 2
assert [layer.weight.size(-1) == 3 for layer in feedforward._linear_layers]
params = Params(
{
"input_dim": 2,
"hidden_dims": [3, 4, 5],
"activations": ["relu", "relu", "linear"],
"dropout": 0.2,
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 3
assert isinstance(feedforward._activations[0], torch.nn.ReLU)
assert isinstance(feedforward._activations[1], torch.nn.ReLU)
# It's hard to check that the last activation is the lambda function we use for `linear`,
# so this is good enough.
assert not isinstance(feedforward._activations[2], torch.nn.ReLU)
assert len(feedforward._linear_layers) == 3
assert feedforward._linear_layers[0].weight.size(0) == 3
assert feedforward._linear_layers[1].weight.size(0) == 4
assert feedforward._linear_layers[2].weight.size(0) == 5
assert len(feedforward._dropout) == 3
assert [d.p == 0.2 for d in feedforward._dropout]
def test_init_checks_hidden_dim_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, [5, 5], Activation.by_name("relu")())
def test_init_checks_activation_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, 5, [Activation.by_name("relu")(), Activation.by_name("relu")()])
def test_forward_gives_correct_output(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(feedforward)
input_tensor = torch.FloatTensor([[-3, 1]])
output = feedforward(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
# which then gets a bias added in the second layer to be [1, 1, 1].
assert_almost_equal(output, [[1, 1, 1]])
def test_textual_representation_contains_activations(self):
params = Params(
{
"input_dim": 2,
"hidden_dims": 3,
"activations": ["linear", "relu", "swish"],
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
expected_text_representation = inspect.cleandoc(
"""
FeedForward(
(_activations): ModuleList(
(0): Linear()
(1): ReLU()
(2): Swish()
)
(_linear_layers): ModuleList(
(0): Linear(in_features=2, out_features=3, bias=True)
(1): Linear(in_features=3, out_features=3, bias=True)
(2): Linear(in_features=3, out_features=3, bias=True)
)
(_dropout): ModuleList(
(0): Dropout(p=0.0, inplace=False)
(1): Dropout(p=0.0, inplace=False)
(2): Dropout(p=0.0, inplace=False)
)
)
"""
)
actual_text_representation = str(feedforward)
assert actual_text_representation == expected_text_representation
| allennlp-master | tests/modules/feedforward_test.py |
import torch
import pytest
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.modules import ScalarMix
from allennlp.nn import util
class TestScalarMix(AllenNlpTestCase):
def test_scalar_mix_can_run_forward(self):
mixture = ScalarMix(3)
tensors = [torch.randn([3, 4, 5]) for _ in range(3)]
for k in range(3):
mixture.scalar_parameters[k].data[0] = 0.1 * (k + 1)
mixture.gamma.data[0] = 0.5
result = mixture(tensors)
weights = [0.1, 0.2, 0.3]
normed_weights = numpy.exp(weights) / numpy.sum(numpy.exp(weights))
expected_result = sum(normed_weights[k] * tensors[k].data.numpy() for k in range(3))
expected_result *= 0.5
numpy.testing.assert_almost_equal(expected_result, result.data.numpy())
def test_scalar_mix_throws_error_on_incorrect_number_of_inputs(self):
mixture = ScalarMix(3)
tensors = [torch.randn([3, 4, 5]) for _ in range(5)]
with pytest.raises(ConfigurationError):
_ = mixture(tensors)
def test_scalar_mix_throws_error_on_incorrect_initial_scalar_parameters_length(self):
with pytest.raises(ConfigurationError):
ScalarMix(3, initial_scalar_parameters=[0.0, 0.0])
def test_scalar_mix_trainable_with_initial_scalar_parameters(self):
initial_scalar_parameters = [1.0, 2.0, 3.0]
mixture = ScalarMix(3, initial_scalar_parameters=initial_scalar_parameters, trainable=False)
for i, scalar_mix_parameter in enumerate(mixture.scalar_parameters):
assert scalar_mix_parameter.requires_grad is False
assert scalar_mix_parameter.item() == initial_scalar_parameters[i]
def test_scalar_mix_layer_norm(self):
mixture = ScalarMix(3, do_layer_norm="scalar_norm_reg")
tensors = [torch.randn([3, 4, 5]) for _ in range(3)]
numpy_mask = numpy.ones((3, 4), dtype="int32")
numpy_mask[1, 2:] = 0
mask = torch.from_numpy(numpy_mask).bool()
weights = [0.1, 0.2, 0.3]
for k in range(3):
mixture.scalar_parameters[k].data[0] = weights[k]
mixture.gamma.data[0] = 0.5
result = mixture(tensors, mask)
normed_weights = numpy.exp(weights) / numpy.sum(numpy.exp(weights))
expected_result = numpy.zeros((3, 4, 5))
for k in range(3):
mean = numpy.mean(tensors[k].data.numpy()[numpy_mask == 1])
std = numpy.std(tensors[k].data.numpy()[numpy_mask == 1])
normed_tensor = (tensors[k].data.numpy() - mean) / (
std + util.tiny_value_of_dtype(torch.float)
)
expected_result += normed_tensor * normed_weights[k]
expected_result *= 0.5
numpy.testing.assert_almost_equal(expected_result, result.data.numpy(), decimal=6)
| allennlp-master | tests/modules/scalar_mix_test.py |
import numpy
import pytest
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.common.params import Params
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.util import sort_batch_by_length
class TestStackedBidirectionalLstm:
def test_stacked_bidirectional_lstm_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
input_tensor = pack_padded_sequence(input_tensor, [5, 4, 2, 1], batch_first=True)
lstm = StackedBidirectionalLstm(3, 7, 3)
output, _ = lstm(input_tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
def test_stacked_bidirectional_lstm_can_build_from_params(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 5,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2SeqEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 18
assert encoder.is_bidirectional
def test_stacked_bidirectional_lstm_can_build_from_params_seq2vec(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 5,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2VecEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 18
def test_stacked_bidirectional_lstm_can_complete_forward_pass_seq2vec(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 3,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2VecEncoder.from_params(params)
input_tensor = torch.rand(4, 5, 3)
mask = torch.ones(4, 5).bool()
output = encoder(input_tensor, mask)
assert output.detach().numpy().shape == (4, 18)
@pytest.mark.parametrize(
"dropout_name", ("layer_dropout_probability", "recurrent_dropout_probability")
)
def test_stacked_bidirectional_lstm_dropout_version_is_different(self, dropout_name: str):
stacked_lstm = StackedBidirectionalLstm(input_size=10, hidden_size=11, num_layers=3)
if dropout_name == "layer_dropout_probability":
dropped_stacked_lstm = StackedBidirectionalLstm(
input_size=10, hidden_size=11, num_layers=3, layer_dropout_probability=0.9
)
elif dropout_name == "recurrent_dropout_probability":
dropped_stacked_lstm = StackedBidirectionalLstm(
input_size=10, hidden_size=11, num_layers=3, recurrent_dropout_probability=0.9
)
else:
raise ValueError("Do not recognise the following dropout name " f"{dropout_name}")
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(stacked_lstm)
initializer(dropped_stacked_lstm)
initial_state = torch.randn([3, 5, 11])
initial_memory = torch.randn([3, 5, 11])
tensor = torch.rand([5, 7, 10])
sequence_lengths = torch.LongTensor([7, 7, 7, 7, 7])
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(tensor, sequence_lengths)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
stacked_output, stacked_state = stacked_lstm(lstm_input, (initial_state, initial_memory))
dropped_output, dropped_state = dropped_stacked_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output_sequence, _ = pad_packed_sequence(dropped_output, batch_first=True)
stacked_output_sequence, _ = pad_packed_sequence(stacked_output, batch_first=True)
if dropout_name == "layer_dropout_probability":
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_output_sequence.data.numpy(),
stacked_output_sequence.data.numpy(),
decimal=4,
)
if dropout_name == "recurrent_dropout_probability":
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[0].data.numpy(), stacked_state[0].data.numpy(), decimal=4
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[1].data.numpy(), stacked_state[1].data.numpy(), decimal=4
)
| allennlp-master | tests/modules/stacked_bidirectional_lstm_test.py |
import numpy as np
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.masked_layer_norm import MaskedLayerNorm
from allennlp.nn import util
class TestMaskedLayerNorm(AllenNlpTestCase):
def test_masked_layer_norm(self):
x_n = np.random.rand(2, 3, 7)
mask_n = np.array([[1, 1, 0], [1, 1, 1]])
x = torch.from_numpy(x_n).float()
mask = torch.from_numpy(mask_n).bool()
layer_norm = MaskedLayerNorm(7, gamma0=0.2)
normed_x = layer_norm(x, mask)
N = 7 * 5
mean = (x_n * np.expand_dims(mask_n, axis=-1)).sum() / N
std = np.sqrt(
(((x_n - mean) * np.expand_dims(mask_n, axis=-1)) ** 2).sum() / N
+ util.tiny_value_of_dtype(torch.float)
)
expected = 0.2 * (x_n - mean) / (std + util.tiny_value_of_dtype(torch.float))
assert np.allclose(normed_x.data.numpy(), expected)
| allennlp-master | tests/modules/masked_layer_norm_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Seq2VecEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestSeq2VecEncoder(AllenNlpTestCase):
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params(
{
"type": "lstm",
"bidirectional": True,
"num_layers": 3,
"input_size": 5,
"hidden_size": 7,
}
)
encoder = Seq2VecEncoder.from_params(params)
assert encoder.__class__.__name__ == "LstmSeq2VecEncoder"
assert encoder._module.__class__.__name__ == "LSTM"
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
def test_from_params_requires_batch_first(self):
params = Params({"type": "lstm", "batch_first": False})
with pytest.raises(ConfigurationError):
Seq2VecEncoder.from_params(params)
| allennlp-master | tests/modules/seq2vec_encoder_test.py |
import numpy
import torch
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.common.testing import AllenNlpTestCase
class TestElmoLstmCell(AllenNlpTestCase):
def test_elmo_lstm(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
mask = torch.ones([4, 5]).bool()
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 1:] = False
lstm = ElmoLstm(
num_layers=2,
input_size=3,
hidden_size=5,
cell_size=7,
memory_cell_clip_value=2,
state_projection_clip_value=1,
)
output_sequence = lstm(input_tensor, mask)
# Check all the layer outputs are masked properly.
numpy.testing.assert_array_equal(output_sequence.data[:, 1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[:, 2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[:, 3, 1:, :].numpy(), 0.0)
# LSTM state should be (num_layers, batch_size, hidden_size)
assert list(lstm._states[0].size()) == [2, 4, 10]
# LSTM memory cell should be (num_layers, batch_size, cell_size)
assert list((lstm._states[1].size())) == [2, 4, 14]
| allennlp-master | tests/modules/stacked_elmo_lstm_test.py |
from typing import Tuple
import torch
import numpy as np
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.sampled_softmax_loss import _choice, SampledSoftmaxLoss
from allennlp.modules import SoftmaxLoss
class TestSampledSoftmaxLoss(AllenNlpTestCase):
def test_choice(self):
sample, num_tries = _choice(num_words=1000, num_samples=50)
assert len(set(sample)) == 50
assert all(0 <= x < 1000 for x in sample)
assert num_tries >= 50
def test_sampled_softmax_can_run(self):
softmax = SampledSoftmaxLoss(num_words=1000, embedding_dim=12, num_samples=50)
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
_ = softmax(embedding, targets)
def test_sampled_equals_unsampled_during_eval(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=40)
unsampled_softmax = SoftmaxLoss(num_words=10000, embedding_dim=12)
sampled_softmax.eval()
unsampled_softmax.eval()
# set weights equal, use transpose because opposite shapes
sampled_softmax.softmax_w.data = unsampled_softmax.softmax_w.t()
sampled_softmax.softmax_b.data = unsampled_softmax.softmax_b
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
full_loss = unsampled_softmax(embedding, targets).item()
sampled_loss = sampled_softmax(embedding, targets).item()
# Should be really close
np.testing.assert_almost_equal(sampled_loss, full_loss)
def test_sampled_softmax_has_greater_loss_in_train_mode(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=10)
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
sampled_softmax.train()
train_loss = sampled_softmax(embedding, targets).item()
sampled_softmax.eval()
eval_loss = sampled_softmax(embedding, targets).item()
assert eval_loss > train_loss
def test_sampled_equals_unsampled_when_biased_against_non_sampled_positions(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=10)
unsampled_softmax = SoftmaxLoss(num_words=10000, embedding_dim=12)
# fake out choice function
FAKE_SAMPLES = [100, 200, 300, 400, 500, 600, 700, 800, 900, 9999]
def fake_choice(num_words: int, num_samples: int) -> Tuple[np.ndarray, int]:
assert (num_words, num_samples) == (10000, 10)
return np.array(FAKE_SAMPLES), 12
sampled_softmax.choice_func = fake_choice
# bias out the unsampled terms:
for i in range(10000):
if i not in FAKE_SAMPLES:
unsampled_softmax.softmax_b[i] = -10000
# set weights equal, use transpose because opposite shapes
sampled_softmax.softmax_w.data = unsampled_softmax.softmax_w.t()
sampled_softmax.softmax_b.data = unsampled_softmax.softmax_b
sampled_softmax.train()
unsampled_softmax.train()
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
full_loss = unsampled_softmax(embedding, targets).item()
sampled_loss = sampled_softmax(embedding, targets).item()
# Should be close
pct_error = (sampled_loss - full_loss) / full_loss
assert abs(pct_error) < 0.001
| allennlp-master | tests/modules/sampled_softmax_loss_test.py |
from numpy.testing import assert_almost_equal
from overrides import overrides
import torch
from torch.nn import Embedding, Module, Parameter
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import TimeDistributed
class TestTimeDistributed(AllenNlpTestCase):
def test_time_distributed_reshapes_named_arg_correctly(self):
char_embedding = Embedding(2, 2)
char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
distributed_embedding = TimeDistributed(char_embedding)
char_input = torch.LongTensor([[[1, 0], [1, 1]]])
output = distributed_embedding(char_input)
assert_almost_equal(
output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
)
def test_time_distributed_reshapes_positional_kwarg_correctly(self):
char_embedding = Embedding(2, 2)
char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
distributed_embedding = TimeDistributed(char_embedding)
char_input = torch.LongTensor([[[1, 0], [1, 1]]])
output = distributed_embedding(input=char_input)
assert_almost_equal(
output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
)
def test_time_distributed_works_with_multiple_inputs(self):
module = lambda x, y: x + y
distributed = TimeDistributed(module)
x_input = torch.LongTensor([[[1, 2], [3, 4]]])
y_input = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed(x_input, y_input)
assert_almost_equal(output.data.numpy(), [[[5, 4], [12, 5]]])
def test_time_distributed_reshapes_multiple_inputs_with_pass_through_tensor_correctly(self):
class FakeModule(Module):
@overrides
def forward(self, input_tensor, tensor_to_pass_through=None, another_tensor=None):
return input_tensor + tensor_to_pass_through + another_tensor
module = FakeModule()
distributed_module = TimeDistributed(module)
input_tensor1 = torch.LongTensor([[[1, 2], [3, 4]]])
input_to_pass_through = torch.LongTensor([3, 7])
input_tensor2 = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed_module(
input_tensor1,
tensor_to_pass_through=input_to_pass_through,
another_tensor=input_tensor2,
pass_through=["tensor_to_pass_through"],
)
assert_almost_equal(output.data.numpy(), [[[8, 11], [15, 12]]])
def test_time_distributed_reshapes_multiple_inputs_with_pass_through_non_tensor_correctly(self):
class FakeModule(Module):
@overrides
def forward(self, input_tensor, number=0, another_tensor=None):
return input_tensor + number + another_tensor
module = FakeModule()
distributed_module = TimeDistributed(module)
input_tensor1 = torch.LongTensor([[[1, 2], [3, 4]]])
input_number = 5
input_tensor2 = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed_module(
input_tensor1,
number=input_number,
another_tensor=input_tensor2,
pass_through=["number"],
)
assert_almost_equal(output.data.numpy(), [[[10, 9], [17, 10]]])
| allennlp-master | tests/modules/time_distributed_test.py |
import numpy
import pytest
import torch
from torch.nn import LSTM, RNN
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common.testing import AllenNlpTestCase, requires_gpu
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class TestEncoderBase(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True
)
self.rnn = RNN(
bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True
)
self.encoder_base = _EncoderBase(stateful=True)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[3, 2:, :] = 0
self.tensor = tensor
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False # <= completely masked
mask[3, 2:] = False
mask[4, :] = False # <= completely masked
self.mask = mask
self.batch_size = 5
self.num_valid = 3
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
_, _, restoration_indices, sorting_indices = sort_batch_by_length(tensor, sequence_lengths)
self.sorting_indices = sorting_indices
self.restoration_indices = restoration_indices
def test_non_stateful_states_are_sorted_correctly(self):
encoder_base = _EncoderBase(stateful=False)
initial_states = (torch.randn(6, 5, 7), torch.randn(6, 5, 7))
# Check that we sort the state for non-stateful encoders. To test
# we'll just use a "pass through" encoder, as we aren't actually testing
# the functionality of the encoder here anyway.
_, states, restoration_indices = encoder_base.sort_and_run_forward(
lambda *x: x, self.tensor, self.mask, initial_states
)
# Our input tensor had 2 zero length sequences, so we need
# to concat a tensor of shape
# (num_layers * num_directions, batch_size - num_valid, hidden_dim),
# to the output before unsorting it.
zeros = torch.zeros([6, 2, 7])
# sort_and_run_forward strips fully-padded instances from the batch;
# in order to use the restoration_indices we need to add back the two
# that got stripped. What we get back should match what we started with.
for state, original in zip(states, initial_states):
assert list(state.size()) == [6, 3, 7]
state_with_zeros = torch.cat([state, zeros], 1)
unsorted_state = state_with_zeros.index_select(1, restoration_indices)
for index in [0, 1, 3]:
numpy.testing.assert_array_equal(
unsorted_state[:, index, :].data.numpy(), original[:, index, :].data.numpy()
)
def test_get_initial_states(self):
# First time we call it, there should be no state, so we should return None.
assert (
self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
is None
)
# First test the case that the previous state is _smaller_ than the current state input.
initial_states = (torch.randn([1, 3, 7]), torch.randn([1, 3, 7]))
self.encoder_base._states = initial_states
# sorting indices are: [0, 1, 3, 2, 4]
returned_states = self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
correct_expanded_states = [
torch.cat([state, torch.zeros([1, 2, 7])], 1) for state in initial_states
]
# State should have been expanded with zeros to have shape (1, batch_size, hidden_size).
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), correct_expanded_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), correct_expanded_states[1].data.numpy()
)
# The returned states should be of shape (1, num_valid, hidden_size) and
# they also should have been sorted with respect to the indices.
# sorting indices are: [0, 1, 3, 2, 4]
correct_returned_states = [
state.index_select(1, self.sorting_indices)[:, : self.num_valid, :]
for state in correct_expanded_states
]
numpy.testing.assert_array_equal(
returned_states[0].data.numpy(), correct_returned_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
returned_states[1].data.numpy(), correct_returned_states[1].data.numpy()
)
# Now test the case that the previous state is larger:
original_states = (torch.randn([1, 10, 7]), torch.randn([1, 10, 7]))
self.encoder_base._states = original_states
# sorting indices are: [0, 1, 3, 2, 4]
returned_states = self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
# State should not have changed, as they were larger
# than the batch size of the requested states.
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), original_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), original_states[1].data.numpy()
)
# The returned states should be of shape (1, num_valid, hidden_size) and they
# also should have been sorted with respect to the indices.
correct_returned_state = [
x.index_select(1, self.sorting_indices)[:, : self.num_valid, :] for x in original_states
]
numpy.testing.assert_array_equal(
returned_states[0].data.numpy(), correct_returned_state[0].data.numpy()
)
numpy.testing.assert_array_equal(
returned_states[1].data.numpy(), correct_returned_state[1].data.numpy()
)
def test_update_states(self):
assert self.encoder_base._states is None
initial_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
index_selected_initial_states = (
initial_states[0].index_select(1, self.restoration_indices),
initial_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(initial_states, self.restoration_indices)
# State was None, so the updated state should just be the sorted given state.
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), index_selected_initial_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), index_selected_initial_states[1].data.numpy()
)
new_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
# tensor has 2 completely masked rows, so the last 2 rows of the _sorted_ states
# will be completely zero, having been appended after calling the respective encoder.
new_states[0][:, -2:, :] = 0
new_states[1][:, -2:, :] = 0
index_selected_new_states = (
new_states[0].index_select(1, self.restoration_indices),
new_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(new_states, self.restoration_indices)
# Check that the update _preserved_ the state for the rows which were
# completely masked (2 and 4):
for index in [2, 4]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_initial_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_initial_states[1][:, index, :].data.numpy(),
)
# Now the states which were updated:
for index in [0, 1, 3]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_new_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_new_states[1][:, index, :].data.numpy(),
)
# Now test the case that the new state is smaller:
small_new_states = torch.randn([1, 3, 7]), torch.randn([1, 3, 7])
# pretend the 2nd sequence in the batch was fully masked.
small_restoration_indices = torch.LongTensor([2, 0, 1])
small_new_states[0][:, 0, :] = 0
small_new_states[1][:, 0, :] = 0
index_selected_small_states = (
small_new_states[0].index_select(1, small_restoration_indices),
small_new_states[1].index_select(1, small_restoration_indices),
)
self.encoder_base._update_states(small_new_states, small_restoration_indices)
# Check the index for the row we didn't update is the same as the previous step:
for index in [1, 3]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_new_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_new_states[1][:, index, :].data.numpy(),
)
# Indices we did update:
for index in [0, 2]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_small_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_small_states[1][:, index, :].data.numpy(),
)
# We didn't update index 4 in the previous step either, so it should be equal to the
# 4th index of initial states.
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, 4, :].data.numpy(),
index_selected_initial_states[0][:, 4, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, 4, :].data.numpy(),
index_selected_initial_states[1][:, 4, :].data.numpy(),
)
def test_reset_states(self):
# Initialize the encoder states.
assert self.encoder_base._states is None
initial_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
index_selected_initial_states = (
initial_states[0].index_select(1, self.restoration_indices),
initial_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(initial_states, self.restoration_indices)
# Check that only some of the states are reset when a mask is provided.
mask = torch.tensor([True, True, False, False, False])
self.encoder_base.reset_states(mask)
# First two states should be zeros
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, :2, :].data.numpy(),
torch.zeros_like(initial_states[0])[:, :2, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, :2, :].data.numpy(),
torch.zeros_like(initial_states[1])[:, :2, :].data.numpy(),
)
# Remaining states should be the same
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, 2:, :].data.numpy(),
index_selected_initial_states[0][:, 2:, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, 2:, :].data.numpy(),
index_selected_initial_states[1][:, 2:, :].data.numpy(),
)
# Check that error is raised if mask has wrong batch size.
bad_mask = torch.tensor([True, True, False])
with pytest.raises(ValueError):
self.encoder_base.reset_states(bad_mask)
# Check that states are reset to None if no mask is provided.
self.encoder_base.reset_states()
assert self.encoder_base._states is None
def test_non_contiguous_initial_states_handled(self):
# Check that the encoder is robust to non-contiguous initial states.
# Case 1: Encoder is not stateful
# A transposition will make the tensors non-contiguous, start them off at the wrong shape
# and transpose them into the right shape.
encoder_base = _EncoderBase(stateful=False)
initial_states = (
torch.randn(5, 6, 7).permute(1, 0, 2),
torch.randn(5, 6, 7).permute(1, 0, 2),
)
assert not initial_states[0].is_contiguous() and not initial_states[1].is_contiguous()
assert initial_states[0].size() == torch.Size([6, 5, 7])
assert initial_states[1].size() == torch.Size([6, 5, 7])
# We'll pass them through an LSTM encoder and a vanilla RNN encoder to make sure it works
# whether the initial states are a tuple of tensors or just a single tensor.
encoder_base.sort_and_run_forward(self.lstm, self.tensor, self.mask, initial_states)
encoder_base.sort_and_run_forward(self.rnn, self.tensor, self.mask, initial_states[0])
# Case 2: Encoder is stateful
# For stateful encoders, the initial state may be non-contiguous if its state was
# previously updated with non-contiguous tensors. As in the non-stateful tests, we check
# that the encoder still works on initial states for RNNs and LSTMs.
final_states = initial_states
# Check LSTM
encoder_base = _EncoderBase(stateful=True)
encoder_base._update_states(final_states, self.restoration_indices)
encoder_base.sort_and_run_forward(self.lstm, self.tensor, self.mask)
# Check RNN
encoder_base.reset_states()
encoder_base._update_states([final_states[0]], self.restoration_indices)
encoder_base.sort_and_run_forward(self.rnn, self.tensor, self.mask)
@requires_gpu
def test_non_contiguous_initial_states_handled_on_gpu(self):
# Some PyTorch operations which produce contiguous tensors on the CPU produce
# non-contiguous tensors on the GPU (e.g. forward pass of an RNN when batch_first=True).
# Accordingly, we perform the same checks from previous test on the GPU to ensure the
# encoder is not affected by which device it is on.
# Case 1: Encoder is not stateful
# A transposition will make the tensors non-contiguous, start them off at the wrong shape
# and transpose them into the right shape.
encoder_base = _EncoderBase(stateful=False).cuda()
initial_states = (
torch.randn(5, 6, 7).cuda().permute(1, 0, 2),
torch.randn(5, 6, 7).cuda().permute(1, 0, 2),
)
assert not initial_states[0].is_contiguous() and not initial_states[1].is_contiguous()
assert initial_states[0].size() == torch.Size([6, 5, 7])
assert initial_states[1].size() == torch.Size([6, 5, 7])
# We'll pass them through an LSTM encoder and a vanilla RNN encoder to make sure it works
# whether the initial states are a tuple of tensors or just a single tensor.
encoder_base.sort_and_run_forward(
self.lstm.cuda(), self.tensor.cuda(), self.mask.cuda(), initial_states
)
encoder_base.sort_and_run_forward(
self.rnn.cuda(), self.tensor.cuda(), self.mask.cuda(), initial_states[0]
)
# Case 2: Encoder is stateful
# For stateful encoders, the initial state may be non-contiguous if its state was
# previously updated with non-contiguous tensors. As in the non-stateful tests, we check
# that the encoder still works on initial states for RNNs and LSTMs.
final_states = initial_states
# Check LSTM
encoder_base = _EncoderBase(stateful=True).cuda()
encoder_base._update_states(final_states, self.restoration_indices.cuda())
encoder_base.sort_and_run_forward(self.lstm.cuda(), self.tensor.cuda(), self.mask.cuda())
# Check RNN
encoder_base.reset_states()
encoder_base._update_states([final_states[0]], self.restoration_indices.cuda())
encoder_base.sort_and_run_forward(self.rnn.cuda(), self.tensor.cuda(), self.mask.cuda())
| allennlp-master | tests/modules/encoder_base_test.py |
import numpy
import torch
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
from allennlp.common.testing import AllenNlpTestCase
class TestLstmCellWithProjection(AllenNlpTestCase):
def test_elmo_lstm_cell_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
initial_hidden_state = torch.ones([1, 4, 5])
initial_memory_state = torch.ones([1, 4, 7])
lstm = LstmCellWithProjection(
input_size=3,
hidden_size=5,
cell_size=7,
memory_cell_clip_value=2,
state_projection_clip_value=1,
)
output_sequence, lstm_state = lstm(
input_tensor, [5, 4, 2, 1], (initial_hidden_state, initial_memory_state)
)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
# Test the state clipping.
numpy.testing.assert_array_less(output_sequence.data.numpy(), 1.0)
numpy.testing.assert_array_less(-output_sequence.data.numpy(), 1.0)
# LSTM state should be (num_layers, batch_size, hidden_size)
assert list(lstm_state[0].size()) == [1, 4, 5]
# LSTM memory cell should be (num_layers, batch_size, cell_size)
assert list((lstm_state[1].size())) == [1, 4, 7]
# Test the cell clipping.
numpy.testing.assert_array_less(lstm_state[0].data.numpy(), 2.0)
numpy.testing.assert_array_less(-lstm_state[0].data.numpy(), 2.0)
| allennlp-master | tests/modules/lstm_cell_with_projection_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Seq2SeqEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestSeq2SeqEncoder(AllenNlpTestCase):
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params(
{
"type": "lstm",
"bidirectional": True,
"num_layers": 3,
"input_size": 5,
"hidden_size": 7,
"stateful": True,
}
)
encoder = Seq2SeqEncoder.from_params(params)
assert encoder.__class__.__name__ == "LstmSeq2SeqEncoder"
assert encoder._module.__class__.__name__ == "LSTM"
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
assert encoder.stateful is True
def test_from_params_requires_batch_first(self):
params = Params({"type": "lstm", "batch_first": False})
with pytest.raises(ConfigurationError):
Seq2SeqEncoder.from_params(params)
| allennlp-master | tests/modules/seq2seq_encoder_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.modules import ResidualWithLayerDropout
from allennlp.common.testing import AllenNlpTestCase
class TestResidualWithLayerDropout(AllenNlpTestCase):
def test_dropout_works_for_training(self):
layer_input_tensor = torch.FloatTensor([[2, 1], [-3, -2]])
layer_output_tensor = torch.FloatTensor([[1, 3], [2, -1]])
# The layer output should be dropped
residual_with_layer_dropout = ResidualWithLayerDropout(1)
residual_with_layer_dropout.train()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2, 1], [-3, -2]])
result = residual_with_layer_dropout(
layer_input_tensor, layer_output_tensor, 1, 1
).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2, 1], [-3, -2]])
# The layer output should not be dropped
residual_with_layer_dropout = ResidualWithLayerDropout(0.0)
residual_with_layer_dropout.train()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2 + 1, 1 + 3], [-3 + 2, -2 - 1]])
def test_dropout_works_for_testing(self):
layer_input_tensor = torch.FloatTensor([[2, 1], [-3, -2]])
layer_output_tensor = torch.FloatTensor([[1, 3], [2, -1]])
# During testing, the layer output is re-calibrated according to the survival probability,
# and then added to the input.
residual_with_layer_dropout = ResidualWithLayerDropout(0.2)
residual_with_layer_dropout.eval()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2 + 1 * 0.8, 1 + 3 * 0.8], [-3 + 2 * 0.8, -2 - 1 * 0.8]])
| allennlp-master | tests/modules/residual_with_layer_dropout_test.py |
import pytest
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import GatedSum
class TestGatedSum(AllenNlpTestCase):
def test_gated_sum_can_run_forward(self):
a = torch.FloatTensor([1, 2, 3, 4, 5])
b = -a + 0.1
weight_value = 2
gate_value = torch.sigmoid(torch.FloatTensor([1]))
expected = gate_value * a + (1 - gate_value) * b
with torch.no_grad(): # because we want to change the weight
gated_sum = GatedSum(a.size(-1))
gated_sum._gate.weight *= 0
gated_sum._gate.weight += weight_value
gated_sum._gate.bias *= 0
out = gated_sum(a, b)
numpy.testing.assert_almost_equal(expected.data.numpy(), out.data.numpy(), decimal=5)
with pytest.raises(ValueError):
GatedSum(a.size(-1))(a, b.unsqueeze(0))
with pytest.raises(ValueError):
GatedSum(100)(a, b)
def test_input_output_dim(self):
dim = 77
gated_sum = GatedSum(dim)
numpy.testing.assert_equal(gated_sum.get_input_dim(), dim)
numpy.testing.assert_equal(gated_sum.get_output_dim(), dim)
| allennlp-master | tests/modules/gated_sum_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.modules import Highway
from allennlp.common.testing import AllenNlpTestCase
class TestHighway(AllenNlpTestCase):
def test_forward_works_on_simple_input(self):
highway = Highway(2, 2)
highway._layers[0].weight.data.fill_(1)
highway._layers[0].bias.data.fill_(0)
highway._layers[1].weight.data.fill_(2)
highway._layers[1].bias.data.fill_(-2)
input_tensor = torch.FloatTensor([[-2, 1], [3, -2]])
result = highway(input_tensor).data.numpy()
assert result.shape == (2, 2)
# This was checked by hand.
assert_almost_equal(result, [[-0.0394, 0.0197], [1.7527, -0.5550]], decimal=4)
def test_forward_works_on_nd_input(self):
highway = Highway(2, 2)
input_tensor = torch.ones(2, 2, 2)
output = highway(input_tensor)
assert output.size() == (2, 2, 2)
| allennlp-master | tests/modules/highway_test.py |
import itertools
import math
from pytest import approx, raises
import torch
from numpy.testing import assert_allclose
from allennlp.modules import ConditionalRandomField
from allennlp.modules.conditional_random_field import allowed_transitions
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
class TestConditionalRandomField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.logits = torch.Tensor(
[
[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3, 0.1], [0, 0, 0.9, 10, 1]],
[[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, 0.3, 0.1], [0, 0, 0.9, 1, 1]],
]
)
self.tags = torch.LongTensor([[2, 3, 4], [3, 2, 2]])
self.transitions = torch.Tensor(
[
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.8, 0.3, 0.1, 0.7, 0.9],
[-0.3, 2.1, -5.6, 3.4, 4.0],
[0.2, 0.4, 0.6, -0.3, -0.4],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
)
self.transitions_from_start = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.6])
self.transitions_to_end = torch.Tensor([-0.1, -0.2, 0.3, -0.4, -0.4])
# Use the CRF Module with fixed transitions to compute the log_likelihood
self.crf = ConditionalRandomField(5)
self.crf.transitions = torch.nn.Parameter(self.transitions)
self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
def score(self, logits, tags):
"""
Computes the likelihood score for the given sequence of tags,
given the provided logits (and the transition weights in the CRF model)
"""
# Start with transitions from START and to END
total = self.transitions_from_start[tags[0]] + self.transitions_to_end[tags[-1]]
# Add in all the intermediate transitions
for tag, next_tag in zip(tags, tags[1:]):
total += self.transitions[tag, next_tag]
# Add in the logits for the observed tags
for logit, tag in zip(logits, tags):
total += logit[tag]
return total
def naive_most_likely_sequence(self, logits, mask):
# We iterate over all possible tag sequences and use self.score
# to check the likelihood of each. The most likely sequence should be the
# same as what we get from viterbi_tags.
most_likely_tags = []
best_scores = []
for logit, mas in zip(logits, mask):
mask_indices = mas.nonzero(as_tuple=False).squeeze()
logit = torch.index_select(logit, 0, mask_indices)
sequence_length = logit.shape[0]
most_likely, most_likelihood = None, -float("inf")
for tags in itertools.product(range(5), repeat=sequence_length):
score = self.score(logit.data, tags)
if score > most_likelihood:
most_likely, most_likelihood = tags, score
# Convert tuple to list; otherwise == complains.
most_likely_tags.append(list(most_likely))
best_scores.append(most_likelihood)
return most_likely_tags, best_scores
def test_forward_works_without_mask(self):
log_likelihood = self.crf(self.logits, self.tags).item()
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i in zip(self.logits, self.tags):
numerator = self.score(logits_i.detach(), tags_i.detach())
all_scores = [
self.score(logits_i.detach(), tags_j)
for tags_j in itertools.product(range(5), repeat=3)
]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood.item() == approx(log_likelihood)
def test_forward_works_with_mask(self):
# Use a non-trivial mask
mask = torch.tensor([[True, True, True], [True, True, False]])
log_likelihood = self.crf(self.logits, self.tags, mask).item()
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i, mask_i in zip(self.logits, self.tags, mask):
# Find the sequence length for this input and only look at that much of each sequence.
sequence_length = torch.sum(mask_i.detach())
logits_i = logits_i.data[:sequence_length]
tags_i = tags_i.data[:sequence_length]
numerator = self.score(logits_i, tags_i)
all_scores = [
self.score(logits_i, tags_j)
for tags_j in itertools.product(range(5), repeat=sequence_length)
]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood.item() == approx(log_likelihood)
def test_viterbi_tags(self):
mask = torch.tensor([[True, True, True], [True, False, True]])
viterbi_path = self.crf.viterbi_tags(self.logits, mask)
# Separate the tags and scores.
viterbi_tags = [x for x, y in viterbi_path]
viterbi_scores = [y for x, y in viterbi_path]
most_likely_tags, best_scores = self.naive_most_likely_sequence(self.logits, mask)
assert viterbi_tags == most_likely_tags
assert_allclose(viterbi_scores, best_scores, rtol=1e-5)
def test_viterbi_tags_no_mask(self):
viterbi_path = self.crf.viterbi_tags(self.logits)
# Separate the tags and scores.
viterbi_tags = [x for x, y in viterbi_path]
viterbi_scores = [y for x, y in viterbi_path]
mask = torch.tensor([[True, True, True], [True, True, True]])
most_likely_tags, best_scores = self.naive_most_likely_sequence(self.logits, mask)
assert viterbi_tags == most_likely_tags
assert_allclose(viterbi_scores, best_scores, rtol=1e-5)
def test_viterbi_tags_top_k(self):
mask = torch.tensor([[True, True, True], [True, True, False]])
best_paths = self.crf.viterbi_tags(self.logits, mask, top_k=2)
# Ensure the top path matches not passing top_k
top_path_and_score = [top_k_paths[0] for top_k_paths in best_paths]
assert top_path_and_score == self.crf.viterbi_tags(self.logits, mask)
next_path_and_score = [top_k_paths[1] for top_k_paths in best_paths]
next_viterbi_tags = [x for x, _ in next_path_and_score]
# Check that the next best viterbi tags are what I think they should be.
assert next_viterbi_tags == [[4, 2, 3], [3, 2]]
def test_constrained_viterbi_tags(self):
constraints = {
(0, 0),
(0, 1),
(1, 1),
(1, 2),
(2, 2),
(2, 3),
(3, 3),
(3, 4),
(4, 4),
(4, 0),
}
# Add the transitions to the end tag
# and from the start tag.
for i in range(5):
constraints.add((5, i))
constraints.add((i, 6))
crf = ConditionalRandomField(num_tags=5, constraints=constraints)
crf.transitions = torch.nn.Parameter(self.transitions)
crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
mask = torch.tensor([[True, True, True], [True, True, False]])
viterbi_path = crf.viterbi_tags(self.logits, mask)
# Get just the tags from each tuple of (tags, score).
viterbi_tags = [x for x, y in viterbi_path]
# Now the tags should respect the constraints
assert viterbi_tags == [[2, 3, 3], [2, 3]]
def test_allowed_transitions(self):
bio_labels = ["O", "B-X", "I-X", "B-Y", "I-Y"] # start tag, end tag
# 0 1 2 3 4 5 6
allowed = allowed_transitions("BIO", dict(enumerate(bio_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 1),
(0, 3),
(0, 6),
(1, 0),
(1, 1),
(1, 2),
(1, 3),
(1, 6),
(2, 0),
(2, 1),
(2, 2),
(2, 3),
(2, 6),
(3, 0),
(3, 1),
(3, 3),
(3, 4),
(3, 6),
(4, 0),
(4, 1),
(4, 3),
(4, 4),
(4, 6),
(5, 0),
(5, 1),
(5, 3), # Extra row for start tag
}
bioul_labels = [
"O",
"B-X",
"I-X",
"L-X",
"U-X",
"B-Y",
"I-Y",
"L-Y",
"U-Y",
] # start tag, end tag
# 0 1 2 3 4 5 6 7 8 9 10
allowed = allowed_transitions("BIOUL", dict(enumerate(bioul_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 1),
(0, 4),
(0, 5),
(0, 8),
(0, 10),
(1, 2),
(1, 3), # noqa
(2, 2),
(2, 3),
(3, 0),
(3, 1),
(3, 4),
(3, 5),
(3, 8),
(3, 10),
(4, 0),
(4, 1),
(4, 4),
(4, 5),
(4, 8),
(4, 10),
(5, 6),
(5, 7),
(6, 6),
(6, 7),
(7, 0),
(7, 1),
(7, 4),
(7, 5),
(7, 8),
(7, 10),
(8, 0),
(8, 1),
(8, 4),
(8, 5),
(8, 8),
(8, 10),
# Extra row for start tag.
(9, 0),
(9, 1),
(9, 4),
(9, 5),
(9, 8),
}
iob1_labels = ["O", "B-X", "I-X", "B-Y", "I-Y"] # start tag, end tag
# 0 1 2 3 4 5 6
allowed = allowed_transitions("IOB1", dict(enumerate(iob1_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 2),
(0, 4),
(0, 6),
(1, 0),
(1, 1),
(1, 2),
(1, 4),
(1, 6),
(2, 0),
(2, 1),
(2, 2),
(2, 4),
(2, 6),
(3, 0),
(3, 2),
(3, 3),
(3, 4),
(3, 6),
(4, 0),
(4, 2),
(4, 3),
(4, 4),
(4, 6),
(5, 0),
(5, 2),
(5, 4), # Extra row for start tag
}
with raises(ConfigurationError):
allowed_transitions("allennlp", {})
bmes_labels = ["B-X", "M-X", "E-X", "S-X", "B-Y", "M-Y", "E-Y", "S-Y"] # start tag, end tag
# 0 1 2 3 4 5 6 7 8 9
allowed = allowed_transitions("BMES", dict(enumerate(bmes_labels)))
assert set(allowed) == {
(0, 1),
(0, 2),
(1, 1),
(1, 2), # Extra column for end tag.
(2, 0),
(2, 3),
(2, 4),
(2, 7),
(2, 9), # noqa
(3, 0),
(3, 3),
(3, 4),
(3, 7),
(3, 9),
(4, 5),
(4, 6),
(5, 5),
(5, 6),
(6, 0),
(6, 3),
(6, 4),
(6, 7),
(6, 9),
(7, 0),
(7, 3),
(7, 4),
(7, 7),
(7, 9),
(8, 0),
(8, 3),
(8, 4),
(8, 7), # Extra row for start tag
}
| allennlp-master | tests/modules/conditional_random_field_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder
class TestGatedCnnEncoder(AllenNlpTestCase):
def test_gated_cnn_encoder(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_dilations(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32, layers=[[[2, 32, 1]], [[2, 32, 2]], [[2, 32, 4]], [[2, 32, 8]]]
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_layers(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
return_all_layers=True,
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert len(output) == 3
concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)
assert list(concat_layers.size()) == [5, 3, 10, 64]
| allennlp-master | tests/modules/seq2seq_encoders/gated_cnn_encoder_test.py |
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import PassThroughEncoder
class TestPassThroughEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = PassThroughEncoder(input_dim=9)
assert encoder.get_input_dim() == 9
assert encoder.get_output_dim() == 9
def test_pass_through_encoder_passes_through(self):
encoder = PassThroughEncoder(input_dim=9)
tensor = torch.randn([2, 3, 9])
output = encoder(tensor)
numpy.testing.assert_array_almost_equal(
tensor.detach().cpu().numpy(), output.detach().cpu().numpy()
)
def test_pass_through_encoder_with_mask(self):
encoder = PassThroughEncoder(input_dim=9)
tensor = torch.randn([2, 3, 9])
mask = torch.tensor([[True, True, True], [True, False, False]])
output = encoder(tensor, mask)
target = tensor * mask.unsqueeze(dim=-1).float()
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), target.detach().cpu().numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/pass_through_encoder_test.py |
import numpy
from numpy.testing import assert_almost_equal
import pytest
import torch
from torch.nn import LSTM, GRU
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class TestPytorchSeq2SeqWrapper(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy())
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(encoder_output.data.numpy(), lstm_tensor.data.numpy())
def test_forward_pulls_out_correct_tensor_for_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(
encoder_output.data.numpy(),
lstm_tensor.index_select(0, restoration_indices).data.numpy(),
)
def test_forward_does_not_compress_tensors_padded_to_greater_than_the_max_sequence_length(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 8, 3])
input_tensor[:, 7, :] = 0
mask = torch.ones(5, 8).bool()
mask[:, 7] = False
encoder_output = encoder(input_tensor, mask)
assert encoder_output.size(1) == 8
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2SeqWrapper(lstm)
def test_wrapper_works_when_passed_state_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = False
mask[3, 6:] = False
# Initial states are of shape (num_layers * num_directions, batch_size, hidden_dim)
initial_states = torch.randn(6, 5, 7), torch.randn(6, 5, 7)
_ = encoder(input_tensor, mask, initial_states)
def test_wrapper_can_call_backward_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = 0 # zero length False
mask[3, 6:] = False
output = encoder(input_tensor, mask)
output.sum().backward()
def test_wrapper_stateful(self):
lstm = LSTM(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm, stateful=True)
# To test the stateful functionality we need to call the encoder multiple times.
# Different batch sizes further tests some of the logic.
batch_sizes = [5, 10, 8]
sequence_lengths = [4, 6, 7]
states = []
for batch_size, sequence_length in zip(batch_sizes, sequence_lengths):
tensor = torch.rand([batch_size, sequence_length, 3])
mask = torch.ones(batch_size, sequence_length).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
# Check that the output is masked properly.
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((4, 14)))
for k in range(2):
assert_almost_equal(
states[-1][k][:, -2:, :].data.numpy(), states[-2][k][:, -2:, :].data.numpy()
)
def test_wrapper_stateful_single_state_gru(self):
gru = GRU(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(gru, stateful=True)
batch_sizes = [10, 5]
states = []
for batch_size in batch_sizes:
tensor = torch.rand([batch_size, 5, 3])
mask = torch.ones(batch_size, 5).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((2, 14)))
assert_almost_equal(
states[-1][0][:, -5:, :].data.numpy(), states[-2][0][:, -5:, :].data.numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/pytorch_seq2seq_wrapper_test.py |
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import FeedForward
from allennlp.modules.seq2seq_encoders.feedforward_encoder import FeedForwardEncoder
from allennlp.nn import Activation
class TestFeedforwardEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
assert encoder.get_input_dim() == feedforward.get_input_dim()
assert encoder.get_output_dim() == feedforward.get_output_dim()
def test_feedforward_encoder_exactly_match_feedforward_each_item(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
tensor = torch.randn([2, 3, 10])
output = encoder(tensor)
target = feedforward(tensor)
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
# mask should work
mask = torch.tensor([[True, True, True], [True, False, False]])
output = encoder(tensor, mask)
target = feedforward(tensor) * mask.unsqueeze(dim=-1).float()
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/feedforward_encoder_test.py |
allennlp-master | tests/modules/seq2seq_encoders/__init__.py |
|
from typing import Optional
import torch
import pytest
from allennlp.modules.seq2seq_encoders import PytorchTransformer
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_positional_embeddings(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 7
max_seq_len = 101
n_head = 5
dims = 11 * n_head
transformer = PytorchTransformer(
dims, 3, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
inputs = torch.randn(batch_size, max_seq_len, dims)
mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
assert not torch.isnan(inputs).any()
assert torch.isfinite(inputs).all()
outputs = transformer(inputs, mask)
assert outputs.size() == inputs.size()
assert not torch.isnan(outputs).any()
assert torch.isfinite(outputs).all()
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_mask_works(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 3
max_seq_len = 11
n_head = 2
dims = 7 * n_head
transformer = PytorchTransformer(
dims, 2, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
# Construct inputs and masks
inputs = torch.randn(batch_size, max_seq_len, dims)
all_ones_mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
mask = all_ones_mask.clone()
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
altered_inputs = inputs + (~mask).unsqueeze(2) * 10.0
# Make sure there is a difference without the mask
assert not torch.allclose(
transformer(inputs, all_ones_mask), transformer(altered_inputs, all_ones_mask)
)
# Make sure there is no difference with the mask
assert torch.allclose(
torch.masked_select(transformer(inputs, mask), mask.unsqueeze(2)),
torch.masked_select(transformer(altered_inputs, mask), mask.unsqueeze(2)),
)
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_positional_encodings(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 3
max_seq_len = 11
n_head = 2
dims = 7 * n_head
transformer = PytorchTransformer(
dims, 2, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
# We test this by running it twice, once with a shuffled sequence. The results should be the same if there
# is no positional encoding, and different otherwise.
inputs = torch.randn(batch_size, max_seq_len, dims)
mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
unshuffled_output = transformer(inputs, mask)
shuffle = torch.arange(0, max_seq_len).unsqueeze(0).expand_as(mask).clone()
for b in range(batch_size):
# Take care not to shuffle the masked values
perm = torch.randperm(max_seq_len - b)
shuffle[b, : max_seq_len - b] = shuffle[b, perm]
shuffle = shuffle.unsqueeze(2).expand_as(inputs)
shuffled_input = torch.gather(inputs, 1, shuffle)
shuffled_output = transformer(shuffled_input, mask)
if positional_encoding is None:
assert torch.allclose(
torch.gather(unshuffled_output, 1, shuffle), shuffled_output, atol=2e-7
)
else:
assert not torch.allclose(
torch.gather(unshuffled_output, 1, shuffle), shuffled_output, atol=2e-7
)
| allennlp-master | tests/modules/seq2seq_encoders/pytorch_transformer_wrapper_test.py |
import torch
import numpy
from overrides import overrides
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import ComposeEncoder, FeedForwardEncoder, Seq2SeqEncoder
from allennlp.modules import FeedForward
class MockSeq2SeqEncoder(Seq2SeqEncoder):
def __init__(self, input_dim: int, output_dim: int, bidirectional: bool = False):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.bidirectional = bidirectional
@overrides
def forward(self, inputs, mask):
pass
@overrides
def get_input_dim(self) -> int:
return self.input_dim
@overrides
def get_output_dim(self) -> int:
return self.output_dim
@overrides
def is_bidirectional(self) -> bool:
return self.bidirectional
def _make_feedforward(input_dim, output_dim):
return FeedForwardEncoder(
FeedForward(
input_dim=input_dim, num_layers=1, activations=torch.nn.ReLU(), hidden_dims=output_dim
)
)
class TestPassThroughEncoder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.encoder = ComposeEncoder(
[_make_feedforward(9, 5), _make_feedforward(5, 10), _make_feedforward(10, 3)]
)
def test_get_dimension_is_correct(self):
assert self.encoder.get_input_dim() == 9
assert self.encoder.get_output_dim() == 3
def test_composes(self):
tensor = torch.zeros(2, 10, 9)
output = self.encoder(tensor)
for encoder in self.encoder.encoders:
tensor = encoder(tensor)
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), tensor.detach().cpu().numpy()
)
def test_pass_through_encoder_with_mask(self):
tensor = torch.randn([2, 3, 9])
mask = torch.tensor([[True, True, True], [True, False, False]])
output = self.encoder(tensor, mask)
for encoder in self.encoder.encoders:
tensor = encoder(tensor, mask)
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), tensor.detach().cpu().numpy()
)
def test_empty(self):
with pytest.raises(ValueError):
ComposeEncoder([])
def test_mismatched_size(self):
with pytest.raises(ValueError):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5),
MockSeq2SeqEncoder(input_dim=1, output_dim=2),
]
)
def test_mismatched_bidirectionality(self):
with pytest.raises(ValueError):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5),
MockSeq2SeqEncoder(input_dim=5, output_dim=2, bidirectional=True),
]
)
def test_all_bidirectional(self):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5, bidirectional=True),
MockSeq2SeqEncoder(input_dim=5, output_dim=2, bidirectional=True),
]
)
| allennlp-master | tests/modules/seq2seq_encoders/compose_encoder_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.dot_product_attention import DotProductAttention
class TestDotProductAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = Attention.from_params(Params({"type": "dot_product"}))
isinstance(legacy_attention, DotProductAttention)
def test_dot_product_similarity(self):
linear = DotProductAttention(normalize=False)
output = linear(
torch.FloatTensor([[0, 0, 0], [1, 1, 1]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(output.numpy(), numpy.array([[0.0, 0.0], [24.0, 33.0]]), decimal=2)
| allennlp-master | tests/modules/attention/dot_product_attention_test.py |
allennlp-master | tests/modules/attention/__init__.py |
|
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.attention import BilinearAttention
from allennlp.common.testing import AllenNlpTestCase
class TestBilinearAttention(AllenNlpTestCase):
def test_forward_does_a_bilinear_product(self):
params = Params({"vector_dim": 2, "matrix_dim": 2, "normalize": False})
bilinear = BilinearAttention.from_params(params)
bilinear._weight_matrix = Parameter(torch.FloatTensor([[-0.3, 0.5], [2.0, -1.0]]))
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[1, 1]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2)
assert_almost_equal(result, [[1.8, -0.4]])
| allennlp-master | tests/modules/attention/bilinear_attention_test.py |
from numpy.testing import assert_almost_equal
import numpy
import torch
from torch.autograd import Variable
from torch.nn import Parameter
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention import LinearAttention
from allennlp.modules.attention.attention import Attention
class LinearAttentionTest(AllenNlpTestCase):
def test_can_init_linear(self):
legacy_attention = Attention.from_params(
Params({"type": "linear", "tensor_1_dim": 3, "tensor_2_dim": 3})
)
isinstance(legacy_attention, LinearAttention)
def test_linear_similarity(self):
linear = LinearAttention(3, 3, normalize=True)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.1]))
output = linear(
Variable(torch.FloatTensor([[-7, -8, -9]])),
Variable(torch.FloatTensor([[[1, 2, 3], [4, 5, 6]]])),
)
assert_almost_equal(output.data.numpy(), numpy.array([[0.0474, 0.9526]]), decimal=2)
def test_bidaf_trilinear_similarity(self):
linear = LinearAttention(2, 2, combination="x,y,x*y", normalize=False)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.0]))
output = linear(
torch.FloatTensor([[4, 5]]), torch.FloatTensor([[[1, 2], [4, 5], [7, 8], [10, 11]]])
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[
[
-1.2 + 2.5 + 2 + -2 + 4 + 10,
-1.2 + 2.5 + 8 + -5 + 16 + 25,
-1.2 + 2.5 + 14 + -8 + 28 + 40,
-1.2 + 2.5 + 20 + -11 + 40 + 55,
]
]
),
decimal=2,
)
| allennlp-master | tests/modules/attention/linear_attention_test.py |
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.attention import AdditiveAttention
from allennlp.common.testing import AllenNlpTestCase
class TestAdditiveAttention(AllenNlpTestCase):
def test_forward_does_an_additive_product(self):
params = Params({"vector_dim": 2, "matrix_dim": 3, "normalize": False})
additive = AdditiveAttention.from_params(params)
additive._w_matrix = Parameter(torch.Tensor([[-0.2, 0.3], [-0.5, 0.5]]))
additive._u_matrix = Parameter(torch.Tensor([[0.0, 1.0], [1.0, 1.0], [1.0, -1.0]]))
additive._v_vector = Parameter(torch.Tensor([[1.0], [-1.0]]))
vectors = torch.FloatTensor([[0.7, -0.8], [0.4, 0.9]])
matrices = torch.FloatTensor(
[
[[1.0, -1.0, 3.0], [0.5, -0.3, 0.0], [0.2, -1.0, 1.0], [0.7, 0.8, -1.0]],
[[-2.0, 3.0, -3.0], [0.6, 0.2, 2.0], [0.5, -0.4, -1.0], [0.2, 0.2, 0.0]],
]
)
result = additive(vectors, matrices).detach().numpy()
assert result.shape == (2, 4)
assert_almost_equal(
result,
[
[1.975072, -0.04997836, 1.2176098, -0.9205586],
[-1.4851665, 1.489604, -1.890285, -1.0672251],
],
)
| allennlp-master | tests/modules/attention/additive_attention_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.cosine_attention import CosineAttention
class TestCosineAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = Attention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineAttention)
def test_cosine_similarity(self):
linear = CosineAttention(normalize=False)
output = linear(
torch.FloatTensor([[0, 0, 0], [1, 1, 1]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(output.numpy(), numpy.array([[0.0, 0.0], [0.9948, 0.9973]]), decimal=2)
| allennlp-master | tests/modules/attention/cosine_attention_test.py |
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params(
{
"token_embedders": {
"words1": {"type": "embedding", "embedding_dim": 2},
"words2": {"type": "embedding", "embedding_dim": 5},
"words3": {"type": "embedding", "embedding_dim": 3},
}
}
)
self.token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
self.inputs = {
"words1": {"tokens": torch.LongTensor([[0, 2, 3, 5]])},
"words2": {"tokens": torch.LongTensor([[1, 4, 3, 2]])},
"words3": {"tokens": torch.LongTensor([[1, 5, 1, 2]])},
}
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
# Total mismatch
self.inputs["words4"] = self.inputs["words3"]
del self.inputs["words3"]
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
self.inputs["words3"] = self.inputs["words4"]
# Text field has too many inputs
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
del self.inputs["words4"]
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
def test_forward_works_on_higher_order_input(self):
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 4, "num_embeddings": 15},
"encoder": {
"type": "cnn",
"embedding_dim": 4,
"num_filters": 10,
"ngram_filter_sizes": [3],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 4, 5, 6) * 20).long()},
"characters": {"token_characters": (torch.rand(3, 4, 5, 6, 7) * 15).long()},
}
assert token_embedder(inputs, num_wrapping_dims=2).size() == (3, 4, 5, 6, 12)
def test_forward_runs_with_forward_params(self):
class FakeEmbedder(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, tokens: torch.Tensor, extra_arg: int = None):
assert tokens is not None
assert extra_arg is not None
return tokens
token_embedder = BasicTextFieldEmbedder({"elmo": FakeEmbedder()})
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 5) * 2).long()}}
kwargs = {"extra_arg": 1}
token_embedder(inputs, **kwargs)
def test_forward_runs_with_non_bijective_mapping(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_null(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()}}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_dict(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_bijective_and_non_bijective_mapping(self):
params = Params(
{
"token_embedders": {
"bert": {"type": "pretrained_transformer", "model_name": "bert-base-uncased"},
"token_characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 5},
"encoder": {
"type": "cnn",
"embedding_dim": 5,
"num_filters": 5,
"ngram_filter_sizes": [5],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"bert": {
"token_ids": (torch.rand(3, 5) * 10).long(),
"mask": (torch.rand(3, 5) * 1).bool(),
},
"token_characters": {"token_characters": (torch.rand(3, 5, 5) * 1).long()},
}
token_embedder(inputs)
| allennlp-master | tests/modules/text_field_embedders/basic_text_field_embedder_test.py |
allennlp-master | tests/modules/text_field_embedders/__init__.py |
|
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestCnnEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = CnnEncoder(embedding_dim=5, num_filters=4, ngram_filter_sizes=(3, 5))
assert encoder.get_output_dim() == 8
assert encoder.get_input_dim() == 5
encoder = CnnEncoder(
embedding_dim=5, num_filters=4, ngram_filter_sizes=(3, 5), output_dim=7
)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 5
def test_can_construct_from_params(self):
params = Params({"embedding_dim": 5, "num_filters": 4, "ngram_filter_sizes": [3, 5]})
encoder = CnnEncoder.from_params(params)
assert encoder.get_output_dim() == 8
params = Params(
{"embedding_dim": 5, "num_filters": 4, "ngram_filter_sizes": [3, 5], "output_dim": 7}
)
encoder = CnnEncoder.from_params(params)
assert encoder.get_output_dim() == 7
def test_forward_does_correct_computation(self):
encoder = CnnEncoder(embedding_dim=2, num_filters=1, ngram_filter_sizes=(1, 2))
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(encoder)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
encoder_output = encoder(input_tensor, None)
assert_almost_equal(
encoder_output.data.numpy(), numpy.asarray([[1.6 + 1.0, 3.1 + 1.0]]), decimal=6
)
def test_forward_runs_with_larger_input(self):
encoder = CnnEncoder(
embedding_dim=7, num_filters=13, ngram_filter_sizes=(1, 2, 3, 4, 5), output_dim=30
)
tensor = torch.rand(4, 8, 7)
assert encoder(tensor, None).size() == (4, 30)
def test_forward_respects_masking(self):
# seed 1 fails on the old cnn encoder code
torch.manual_seed(1)
encoder = CnnEncoder(embedding_dim=7, num_filters=13, ngram_filter_sizes=(1, 2, 3, 4, 5))
init = Initializer.from_params(Params({"type": "normal", "mean": 0.0, "std": 10}))
initializer = InitializerApplicator([(".*", init)])
initializer(encoder)
tokens = torch.ones(4, 8, 7)
padded_tokens = torch.nn.functional.pad(tokens.transpose(1, 2), (0, 2), value=5).transpose(
1, 2
)
mask = (
torch.where(
padded_tokens == 5, torch.zeros_like(padded_tokens), torch.ones_like(padded_tokens)
)
.bool()
.any(dim=2)
)
regular_output = encoder.forward(tokens=tokens, mask=None)
masked_output = encoder.forward(tokens=padded_tokens, mask=mask)
assert_almost_equal(regular_output.data.numpy(), masked_output.data.numpy(), decimal=6)
| allennlp-master | tests/modules/seq2vec_encoders/cnn_encoder_test.py |
import pytest
from numpy.testing import assert_almost_equal
import torch
from torch.nn import LSTM
from torch.nn.utils.rnn import pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
class TestPytorchSeq2VecWrapper(AllenNlpTestCase):
def test_get_dimensions_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy()[:, -1, :])
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
reshaped_state = state[0].transpose(0, 1)[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=11, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_with_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
sorted_transposed_state = state[0].transpose(0, 1).index_select(0, restoration_indices)
reshaped_state = sorted_transposed_state[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2VecWrapper(lstm)
def test_wrapper_works_with_alternating_lstm(self):
model = PytorchSeq2VecWrapper(
StackedAlternatingLstm(input_size=4, hidden_size=5, num_layers=3)
)
input_tensor = torch.randn(2, 3, 4)
mask = torch.ones(2, 3).bool()
output = model(input_tensor, mask)
assert tuple(output.size()) == (2, 5)
| allennlp-master | tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py |
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders.cls_pooler import ClsPooler
class TestClsPooler(AllenNlpTestCase):
def test_encoder(self):
embedding = torch.rand(5, 50, 7)
encoder = ClsPooler(embedding_dim=7)
pooled = encoder(embedding, mask=None)
assert list(pooled.size()) == [5, 7]
numpy.testing.assert_array_almost_equal(embedding[:, 0], pooled)
def test_cls_at_end(self):
embedding = torch.arange(20).reshape(5, 4).unsqueeze(-1).expand(5, 4, 7)
mask = torch.tensor(
[
[True, True, True, True],
[True, True, True, False],
[True, True, True, True],
[True, False, False, False],
[True, True, False, False],
]
)
expected = torch.LongTensor([3, 6, 11, 12, 17]).unsqueeze(-1).expand(5, 7)
encoder = ClsPooler(embedding_dim=7, cls_is_last_token=True)
pooled = encoder(embedding, mask=mask)
assert list(pooled.size()) == [5, 7]
numpy.testing.assert_array_almost_equal(expected, pooled)
| allennlp-master | tests/modules/seq2vec_encoders/cls_pooler_test.py |
allennlp-master | tests/modules/seq2vec_encoders/__init__.py |
|
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfEmbeddingsEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=5)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
encoder = BagOfEmbeddingsEncoder(embedding_dim=12)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_can_construct_from_params(self):
params = Params({"embedding_dim": 5})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
params = Params({"embedding_dim": 12, "averaged": True})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_forward_does_correct_computation(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
input_tensor = torch.FloatTensor(
[[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]], [[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]]]
)
mask = torch.ByteTensor([[1, 1, 1], [1, 1, 0]])
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray([[0.7 + 0.1 + 0.3, 0.8 + 1.5 + 0.6], [0.5 + 1.4, 0.3 + 1.1]]),
)
def test_forward_does_correct_computation_with_average(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = torch.FloatTensor(
[
[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]],
[[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]],
[[0.4, 0.3], [0.4, 0.3], [1.4, 1.7]],
]
)
mask = torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]])
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray(
[
[(0.7 + 0.1 + 0.3) / 3, (0.8 + 1.5 + 0.6) / 3],
[(0.5 + 1.4) / 2, (0.3 + 1.1) / 2],
[0.0, 0.0],
]
),
)
def test_forward_does_correct_computation_with_average_no_mask(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = torch.FloatTensor(
[[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]], [[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]]]
)
encoder_output = encoder(input_tensor)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray(
[
[(0.7 + 0.1 + 0.3) / 3, (0.8 + 1.5 + 0.6) / 3],
[(0.5 + 1.4 + 0.3) / 3, (0.3 + 1.1 + 0.9) / 3],
]
),
)
| allennlp-master | tests/modules/seq2vec_encoders/boe_encoder_test.py |
import numpy as np
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders.cnn_highway_encoder import CnnHighwayEncoder
from allennlp.modules.time_distributed import TimeDistributed
class TestCnnHighwayEncoder(AllenNlpTestCase):
def run_encoder_against_random_embeddings(self, do_layer_norm):
encoder = CnnHighwayEncoder(
activation="relu",
embedding_dim=4,
filters=[[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
num_highway=2,
projection_dim=16,
projection_location="after_cnn",
do_layer_norm=do_layer_norm,
)
encoder = TimeDistributed(encoder)
embedding = torch.from_numpy(np.random.randn(5, 6, 50, 4)).float()
mask = torch.ones(5, 6, 50).bool()
token_embedding = encoder(embedding, mask)
assert list(token_embedding.size()) == [5, 6, 16]
def test_cnn_highway_encoder(self):
self.run_encoder_against_random_embeddings(do_layer_norm=False)
def test_cnn_highway_encoder_with_layer_norm(self):
self.run_encoder_against_random_embeddings(do_layer_norm=True)
| allennlp-master | tests/modules/seq2vec_encoders/cnn_highway_encoder_test.py |
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import BertPooler
class TestBertPooler(AllenNlpTestCase):
def test_encoder(self):
encoder = BertPooler("bert-base-uncased")
assert encoder.get_input_dim() == encoder.get_output_dim()
embedding = torch.rand(8, 24, encoder.get_input_dim())
pooled1 = encoder(embedding)
assert pooled1.size() == (8, encoder.get_input_dim())
embedding[:, 1:, :] = 0
pooled2 = encoder(embedding)
numpy.testing.assert_array_almost_equal(pooled1.detach().numpy(), pooled2.detach().numpy())
| allennlp-master | tests/modules/seq2vec_encoders/bert_pooler_test.py |
import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor
from allennlp.common.params import Params
class TestSelfAttentiveSpanExtractor:
def test_locally_normalised_span_extractor_can_build_from_params(self):
params = Params({"type": "self_attentive", "input_dim": 5})
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, SelfAttentiveSpanExtractor)
def test_attention_is_normalised_correctly(self):
input_dim = 7
sequence_tensor = torch.randn([2, 5, input_dim])
extractor = SelfAttentiveSpanExtractor(input_dim=input_dim)
assert extractor.get_output_dim() == input_dim
assert extractor.get_input_dim() == input_dim
# In order to test the attention, we'll make the weight which computes the logits
# zero, so the attention distribution is uniform over the sentence. This lets
# us check that the computed spans are just the averages of their representations.
extractor._global_attention._module.weight.data.fill_(0.0)
extractor._global_attention._module.bias.data.fill_(0.0)
indices = torch.LongTensor(
[[[1, 3], [2, 4]], [[0, 2], [3, 4]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, input_dim]
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 3:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now test the case in which we have some masked spans in our indices.
indices_mask = torch.tensor([[True, True], [True, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span was masked, so should be completely zero.
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), numpy.zeros([input_dim]))
| allennlp-master | tests/modules/span_extractors/self_attentive_span_extractor_test.py |
allennlp-master | tests/modules/span_extractors/__init__.py |
|
import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, EndpointSpanExtractor
from allennlp.common.params import Params
from allennlp.nn.util import batched_index_select
class TestEndpointSpanExtractor:
def test_endpoint_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "endpoint",
"input_dim": 7,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, EndpointSpanExtractor)
assert extractor.get_output_dim() == 17 # 2 * input_dim + span_width_embedding_dim
def test_correct_sequence_elements_are_embedded(self):
sequence_tensor = torch.randn([2, 5, 7])
# Concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, 14]
assert extractor.get_output_dim() == 14
assert extractor.get_input_dim() == 7
start_indices, end_indices = indices.split(1, -1)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(7, -1)
correct_start_embeddings = batched_index_select(sequence_tensor, start_indices.squeeze())
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze())
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
def test_masked_indices_are_handled_correctly(self):
sequence_tensor = torch.randn([2, 5, 7])
# concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
# Make a mask with the second batch element completely masked.
indices_mask = torch.tensor([[True, True], [False, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
start_embeddings, end_embeddings = span_representations.split(7, -1)
start_indices, end_indices = indices.split(1, -1)
correct_start_embeddings = batched_index_select(
sequence_tensor, start_indices.squeeze()
).data
# Completely masked second batch element, so it should all be zero.
correct_start_embeddings[1, :, :].fill_(0)
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze()).data
correct_end_embeddings[1, :, :].fill_(0)
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.numpy()
)
def test_masked_indices_are_handled_correctly_with_exclusive_indices(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = EndpointSpanExtractor(8, "x,y", use_exclusive_start_indices=True)
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [0, 1]]])
sequence_mask = torch.tensor(
[[True, True, True, True, True], [True, True, True, False, False]]
)
span_representations = extractor(sequence_tensor, indices, sequence_mask=sequence_mask)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(8, -1)
correct_start_indices = torch.LongTensor([[0, 1], [-1, -1]])
# These indices should be -1, so they'll be replaced with a sentinel. Here,
# we'll set them to a value other than -1 so we can index select the indices and
# replace them later.
correct_start_indices[1, 0] = 1
correct_start_indices[1, 1] = 1
correct_end_indices = torch.LongTensor([[3, 4], [2, 1]])
correct_start_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_start_embeddings[1, 0] = extractor._start_sentinel.data
correct_start_embeddings[1, 1] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
correct_end_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_end_indices
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
| allennlp-master | tests/modules/span_extractors/endpoint_span_extractor_test.py |
import numpy
import pytest
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.modules.span_extractors import BidirectionalEndpointSpanExtractor, SpanExtractor
from allennlp.nn.util import batched_index_select
class TestBidirectonalEndpointSpanExtractor:
def test_bidirectional_endpoint_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "bidirectional_endpoint",
"input_dim": 4,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, BidirectionalEndpointSpanExtractor)
assert extractor.get_output_dim() == 2 + 2 + 3
def test_raises_on_odd_input_dimension(self):
with pytest.raises(ConfigurationError):
_ = BidirectionalEndpointSpanExtractor(7)
def test_correct_sequence_elements_are_embedded(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = BidirectionalEndpointSpanExtractor(
input_dim=8, forward_combination="x,y", backward_combination="x,y"
)
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, 16]
assert extractor.get_output_dim() == 16
assert extractor.get_input_dim() == 8
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
(
forward_start_embeddings,
forward_end_embeddings,
backward_start_embeddings,
backward_end_embeddings,
) = span_representations.split(4, -1)
forward_sequence_tensor, backward_sequence_tensor = sequence_tensor.split(4, -1)
# Forward direction => subtract 1 from start indices to make them exlusive.
correct_forward_start_indices = torch.LongTensor([[0, 1], [-1, 2]])
# This index should be -1, so it will be replaced with a sentinel. Here,
# we'll set it to a value other than -1 so we can index select the indices and
# replace it later.
correct_forward_start_indices[1, 0] = 1
# Forward direction => end indices are the same.
correct_forward_end_indices = torch.LongTensor([[3, 4], [2, 4]])
# Backward direction => start indices are exclusive, so add 1 to the end indices.
correct_backward_start_indices = torch.LongTensor([[4, 5], [3, 5]])
# These exclusive end indices are outside the tensor, so will be replaced with the end sentinel.
# Here we replace them with ones so we can index select using these indices without torch
# complaining.
correct_backward_start_indices[0, 1] = 1
correct_backward_start_indices[1, 1] = 1
# Backward direction => end indices are inclusive and equal to the forward start indices.
correct_backward_end_indices = torch.LongTensor([[1, 2], [0, 3]])
correct_forward_start_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_forward_start_embeddings[1, 0] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
forward_start_embeddings.data.numpy(), correct_forward_start_embeddings.data.numpy()
)
correct_forward_end_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_end_indices
)
numpy.testing.assert_array_equal(
forward_end_embeddings.data.numpy(), correct_forward_end_embeddings.data.numpy()
)
correct_backward_end_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_end_indices
)
numpy.testing.assert_array_equal(
backward_end_embeddings.data.numpy(), correct_backward_end_embeddings.data.numpy()
)
correct_backward_start_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_start_indices
)
# This element had sequence_tensor index == sequence_tensor.size(1),
# so it's exclusive index is the end sentinel.
correct_backward_start_embeddings[0, 1] = extractor._end_sentinel.data
correct_backward_start_embeddings[1, 1] = extractor._end_sentinel.data
numpy.testing.assert_array_equal(
backward_start_embeddings.data.numpy(), correct_backward_start_embeddings.data.numpy()
)
def test_correct_sequence_elements_are_embedded_with_a_masked_sequence(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = BidirectionalEndpointSpanExtractor(
input_dim=8, forward_combination="x,y", backward_combination="x,y"
)
indices = torch.LongTensor(
[
[[1, 3], [2, 4]],
# This span has an end index at the
# end of the padded sequence.
[[0, 2], [0, 1]],
]
)
sequence_mask = torch.tensor(
[[True, True, True, True, True], [True, True, True, False, False]]
)
span_representations = extractor(sequence_tensor, indices, sequence_mask=sequence_mask)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
(
forward_start_embeddings,
forward_end_embeddings,
backward_start_embeddings,
backward_end_embeddings,
) = span_representations.split(4, -1)
forward_sequence_tensor, backward_sequence_tensor = sequence_tensor.split(4, -1)
# Forward direction => subtract 1 from start indices to make them exlusive.
correct_forward_start_indices = torch.LongTensor([[0, 1], [-1, -1]])
# These indices should be -1, so they'll be replaced with a sentinel. Here,
# we'll set them to a value other than -1 so we can index select the indices and
# replace them later.
correct_forward_start_indices[1, 0] = 1
correct_forward_start_indices[1, 1] = 1
# Forward direction => end indices are the same.
correct_forward_end_indices = torch.LongTensor([[3, 4], [2, 1]])
# Backward direction => start indices are exclusive, so add 1 to the end indices.
correct_backward_start_indices = torch.LongTensor([[4, 5], [3, 2]])
# These exclusive backward start indices are outside the tensor, so will be replaced
# with the end sentinel. Here we replace them with ones so we can index select using
# these indices without torch complaining.
correct_backward_start_indices[0, 1] = 1
# Backward direction => end indices are inclusive and equal to the forward start indices.
correct_backward_end_indices = torch.LongTensor([[1, 2], [0, 0]])
correct_forward_start_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_forward_start_embeddings[1, 0] = extractor._start_sentinel.data
correct_forward_start_embeddings[1, 1] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
forward_start_embeddings.data.numpy(), correct_forward_start_embeddings.data.numpy()
)
correct_forward_end_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_end_indices
)
numpy.testing.assert_array_equal(
forward_end_embeddings.data.numpy(), correct_forward_end_embeddings.data.numpy()
)
correct_backward_end_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_end_indices
)
numpy.testing.assert_array_equal(
backward_end_embeddings.data.numpy(), correct_backward_end_embeddings.data.numpy()
)
correct_backward_start_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_start_indices
)
# This element had sequence_tensor index == sequence_tensor.size(1),
# so it's exclusive index is the end sentinel.
correct_backward_start_embeddings[0, 1] = extractor._end_sentinel.data
# This element has sequence_tensor index == the masked length of the batch element,
# so it should be the end_sentinel even though it isn't greater than sequence_tensor.size(1).
correct_backward_start_embeddings[1, 0] = extractor._end_sentinel.data
numpy.testing.assert_array_equal(
backward_start_embeddings.data.numpy(), correct_backward_start_embeddings.data.numpy()
)
def test_forward_doesnt_raise_with_empty_sequence(self):
# size: (batch_size=1, sequence_length=2, emb_dim=2)
sequence_tensor = torch.FloatTensor([[[0.0, 0.0], [0.0, 0.0]]])
# size: (batch_size=1, sequence_length=2)
sequence_mask = torch.tensor([[False, False]])
# size: (batch_size=1, spans_count=1, 2)
span_indices = torch.LongTensor([[[-1, -1]]])
# size: (batch_size=1, spans_count=1)
span_indices_mask = torch.tensor([[False]])
extractor = BidirectionalEndpointSpanExtractor(
input_dim=2, forward_combination="x,y", backward_combination="x,y"
)
span_representations = extractor(
sequence_tensor,
span_indices,
sequence_mask=sequence_mask,
span_indices_mask=span_indices_mask,
)
numpy.testing.assert_array_equal(
span_representations.detach(), torch.FloatTensor([[[0.0, 0.0, 0.0, 0.0]]])
)
def test_forward_raises_with_invalid_indices(self):
sequence_tensor = torch.randn([2, 5, 8])
extractor = BidirectionalEndpointSpanExtractor(input_dim=8)
indices = torch.LongTensor([[[-1, 3], [7, 4]], [[0, 12], [0, -1]]])
with pytest.raises(ValueError):
_ = extractor(sequence_tensor, indices)
| allennlp-master | tests/modules/span_extractors/bidirectional_endpoint_span_extractor_test.py |
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.nn import Parameter
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import LinearMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestLinearMatrixAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = MatrixAttention.from_params(
Params({"type": "linear", "tensor_1_dim": 3, "tensor_2_dim": 3})
)
isinstance(legacy_attention, LinearMatrixAttention)
def test_linear_similarity(self):
linear = LinearMatrixAttention(3, 3)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.1]))
output = linear(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[[[4.1000, 7.1000], [17.4000, 20.4000]], [[-9.8000, -6.8000], [36.6000, 39.6000]]]
),
decimal=2,
)
def test_bidaf_trilinear_similarity(self):
linear = LinearMatrixAttention(2, 2, combination="x,y,x*y")
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.0]))
output = linear(
torch.FloatTensor([[[0, 0], [4, 5]], [[-7, -8], [10, 11]]]),
torch.FloatTensor([[[1, 2], [4, 5]], [[7, 8], [10, 11]]]),
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[
[
[0 + 0 + 2 + -2 + 0 + 0, 0 + 0 + 8 + -5 + 0 + 0],
[-1.2 + 2.5 + 2 + -2 + 4 + 10, -1.2 + 2.5 + 8 + -5 + 16 + 25],
],
[
[2.1 + -4 + 14 + -8 + -49 + -64, 2.1 + -4 + 20 + -11 + -70 + -88],
[-3 + 5.5 + 14 + -8 + 70 + 88, -3 + 5.5 + 20 + -11 + 100 + 121],
],
]
),
decimal=2,
)
| allennlp-master | tests/modules/matrix_attention/linear_matrix_attention_test.py |
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from allennlp.common.testing import AllenNlpTestCase
class TestBilinearMatrixAttention(AllenNlpTestCase):
def test_forward_does_a_bilinear_product(self):
params = Params({"matrix_1_dim": 2, "matrix_2_dim": 2})
bilinear = BilinearMatrixAttention.from_params(params)
bilinear._weight_matrix = Parameter(torch.FloatTensor([[-0.3, 0.5], [2.0, -1.0]]))
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[[1, 1], [2, 2]]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2, 2)
assert_almost_equal(result, [[[1.8, -0.4], [3.5, -0.9]]])
def test_forward_does_a_bilinear_product_when_using_biases(self):
params = Params({"matrix_1_dim": 2, "matrix_2_dim": 2, "use_input_biases": True})
bilinear = BilinearMatrixAttention.from_params(params)
bilinear._weight_matrix = Parameter(
torch.FloatTensor([[-0.3, 0.5, 1.0], [2.0, -1.0, -1.0], [1.0, 0.5, 1.0]])
)
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[[1, 1], [2, 2]]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2, 2)
assert_almost_equal(result, [[[3.8, 1.1], [5.5, 0.6]]])
| allennlp-master | tests/modules/matrix_attention/bilinear_matrix_attention_test.py |
allennlp-master | tests/modules/matrix_attention/__init__.py |
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import DotProductMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestDotProductMatrixAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "dot_product"}))
isinstance(legacy_attention, DotProductMatrixAttention)
def test_dot_product_similarity(self):
# example use case: a batch of size 2,
# with a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# it is comparing this with another input of the same type
output = DotProductMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# for the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# for the second batch there is
# negative correlation for the first words
# a correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [32, 77]], [[-194, -266], [266, 365]]]), decimal=2
)
| allennlp-master | tests/modules/matrix_attention/dot_product_matrix_attention_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
| allennlp-master | tests/modules/matrix_attention/cosine_matrix_attention_test.py |
import numpy
import torch
from allennlp.modules.token_embedders import PassThroughTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def test_pass_through_embedder(self):
embedder = PassThroughTokenEmbedder(3)
tensor = torch.randn([4, 3])
numpy.testing.assert_equal(tensor.numpy(), embedder(tensor).numpy())
assert embedder.get_output_dim() == 3
| allennlp-master | tests/modules/token_embedders/pass_through_embedder_test.py |
import pytest
import torch
from allennlp.common import Params
from allennlp.data import Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerMismatchedIndexer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerMismatchedEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestPretrainedTransformerMismatchedEmbedder(AllenNlpTestCase):
@pytest.mark.parametrize("train_parameters", [True, False])
def test_end_to_end(self, train_parameters: bool):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", ",", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "is", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
"train_parameters": train_parameters,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [2, 2], [3, 5], [6, 6], [7, 7]],
[[1, 3], [4, 4], [5, 5], [0, 0], [0, 0]],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
assert bert_vectors.requires_grad == train_parameters
def test_long_sequence_splitting_end_to_end(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased", max_length=4)
sentence1 = ["A", ",", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "is", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
"max_length": 4,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True],
[True, True, True, False, False],
]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [2, 2], [3, 5], [6, 6], [7, 7]],
[[1, 3], [4, 4], [5, 5], [0, 0], [0, 0]],
]
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
def test_token_without_wordpieces(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", "", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [-1, -1], [2, 4], [5, 5], [6, 6]],
[[1, 3], [-1, -1], [4, 4], [0, 0], [0, 0]],
]
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
assert all(bert_vectors[0, 1] == 0)
assert all(bert_vectors[1, 1] == 0)
def test_exotic_tokens_no_nan_grads(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", "", "AllenNLP", "sentence", "."]
sentence2 = ["A", "\uf732\uf730\uf730\uf733", "AllenNLP", "sentence", "."]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
token_embedder = BasicTextFieldEmbedder(
{"bert": PretrainedTransformerMismatchedEmbedder("bert-base-uncased")}
)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
bert_vectors = token_embedder(tokens)
test_loss = bert_vectors.mean()
test_loss.backward()
for name, param in token_embedder.named_parameters():
grad = param.grad
assert (grad is None) or (not torch.any(torch.isnan(grad)).item())
| allennlp-master | tests/modules/token_embedders/pretrained_transformer_mismatched_embedder_test.py |
allennlp-master | tests/modules/token_embedders/__init__.py |
|
import gzip
import warnings
import numpy
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.embedding import (
_read_pretrained_embeddings_file,
Embedding,
EmbeddingsTextFile,
format_embeddings_file_uri,
parse_embeddings_file_uri,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class TestEmbedding(AllenNlpTestCase):
def test_get_embedding_layer_uses_correct_embedding_dim(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode("utf-8"))
embedding_weights = _read_pretrained_embeddings_file(embeddings_filename, 3, vocab)
assert tuple(embedding_weights.size()) == (4, 3) # 4 because of padding and OOV
with pytest.raises(ConfigurationError):
_read_pretrained_embeddings_file(embeddings_filename, 4, vocab)
def test_forward_works_with_projection_layer(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("the")
vocab.add_token_to_namespace("a")
params = Params(
{
"pretrained_file": str(
self.FIXTURES_ROOT / "embeddings/glove.6B.300d.sample.txt.gz"
),
"embedding_dim": 300,
"projection_dim": 20,
}
)
embedding_layer = Embedding.from_params(params, vocab=vocab)
input_tensor = torch.LongTensor([[3, 2, 1, 0]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 4, 20)
input_tensor = torch.LongTensor([[[3, 2, 1, 0]]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 1, 4, 20)
def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
vocab.add_token_to_namespace(unicode_space)
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 3})
embedding_layer = Embedding.from_params(params, vocab=vocab)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 3})
embedding_layer = Embedding.from_params(params, vocab=vocab)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([0.0, 0.0, 0.0]))
def test_read_hdf5_format_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.hdf5")
embeddings = numpy.random.rand(vocab.get_vocab_size(), 5)
with h5py.File(embeddings_filename, "w") as fout:
_ = fout.create_dataset("embedding", embeddings.shape, dtype="float32", data=embeddings)
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 5})
embedding_layer = Embedding.from_params(params, vocab=vocab)
assert numpy.allclose(embedding_layer.weight.data.numpy(), embeddings)
def test_read_hdf5_raises_on_invalid_shape(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
embeddings_filename = str(self.TEST_DIR / "embeddings.hdf5")
embeddings = numpy.random.rand(vocab.get_vocab_size(), 10)
with h5py.File(embeddings_filename, "w") as fout:
_ = fout.create_dataset("embedding", embeddings.shape, dtype="float32", data=embeddings)
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 5})
with pytest.raises(ConfigurationError):
_ = Embedding.from_params(params, vocab=vocab)
def test_read_embedding_file_inside_archive(self):
token2vec = {
"think": torch.Tensor([0.143, 0.189, 0.555, 0.361, 0.472]),
"make": torch.Tensor([0.878, 0.651, 0.044, 0.264, 0.872]),
"difference": torch.Tensor([0.053, 0.162, 0.671, 0.110, 0.259]),
"àèìòù": torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0]),
}
vocab = Vocabulary()
for token in token2vec:
vocab.add_token_to_namespace(token)
params = Params(
{
"pretrained_file": str(self.FIXTURES_ROOT / "embeddings/multi-file-archive.zip"),
"embedding_dim": 5,
}
)
with pytest.raises(
ValueError,
match="The archive .*/embeddings/multi-file-archive.zip contains multiple files, "
"so you must select one of the files inside "
"providing a uri of the type: "
"\\(path_or_url_to_archive\\)#path_inside_archive\\.",
):
Embedding.from_params(params, vocab=vocab)
for ext in [".zip", ".tar.gz"]:
archive_path = str(self.FIXTURES_ROOT / "embeddings/multi-file-archive") + ext
file_uri = format_embeddings_file_uri(archive_path, "folder/fake_embeddings.5d.txt")
params = Params({"pretrained_file": file_uri, "embedding_dim": 5})
embeddings = Embedding.from_params(params, vocab=vocab).weight.data
for tok, vec in token2vec.items():
i = vocab.get_token_index(tok)
assert torch.equal(embeddings[i], vec), "Problem with format " + archive_path
def test_embeddings_text_file(self):
txt_path = str(self.FIXTURES_ROOT / "utf-8_sample/utf-8_sample.txt")
# This is for sure a correct way to read an utf-8 encoded text file
with open(txt_path, "rt", encoding="utf-8") as f:
correct_text = f.read()
# Check if we get the correct text on plain and compressed versions of the file
paths = [txt_path] + [txt_path + ext for ext in [".gz", ".zip"]]
for path in paths:
with EmbeddingsTextFile(path) as f:
text = f.read()
assert text == correct_text, "Test failed for file: " + path
# Check for a file contained inside an archive with multiple files
for ext in [".zip", ".tar.gz", ".tar.bz2", ".tar.lzma"]:
archive_path = str(self.FIXTURES_ROOT / "utf-8_sample/archives/utf-8") + ext
file_uri = format_embeddings_file_uri(archive_path, "folder/utf-8_sample.txt")
with EmbeddingsTextFile(file_uri) as f:
text = f.read()
assert text == correct_text, "Test failed for file: " + archive_path
# Passing a second level path when not reading an archive
with pytest.raises(ValueError):
with EmbeddingsTextFile(format_embeddings_file_uri(txt_path, "a/fake/path")):
pass
def test_embeddings_text_file_num_tokens(self):
test_filename = str(self.TEST_DIR / "temp_embeddings.vec")
def check_num_tokens(first_line, expected_num_tokens):
with open(test_filename, "w") as f:
f.write(first_line)
with EmbeddingsTextFile(test_filename) as f:
assert (
f.num_tokens == expected_num_tokens
), f"Wrong num tokens for line: {first_line}"
valid_header_lines = ["1000000 300", "300 1000000", "1000000"]
for line in valid_header_lines:
check_num_tokens(line, expected_num_tokens=1_000_000)
not_header_lines = ["hello 1", "hello 1 2", "111 222 333", "111 222 hello"]
for line in not_header_lines:
check_num_tokens(line, expected_num_tokens=None)
def test_decode_embeddings_file_uri(self):
first_level_paths = [
"path/to/embeddings.gz",
"unicode/path/òàè+ù.vec",
"http://www.embeddings.com/path/to/embeddings.gz",
"http://www.embeddings.com/àèìòù?query=blabla.zip",
]
second_level_paths = ["path/to/glove.27B.300d.vec", "òàè+ù.vec", "crawl-300d-2M.vec"]
for simple_path in first_level_paths:
assert parse_embeddings_file_uri(simple_path) == (simple_path, None)
for path1, path2 in zip(first_level_paths, second_level_paths):
uri = format_embeddings_file_uri(path1, path2)
decoded = parse_embeddings_file_uri(uri)
assert decoded == (path1, path2)
def test_embedding_vocab_extension_with_specified_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens_a")
vocab.add_token_to_namespace("word2", "tokens_a")
embedding_params = Params({"vocab_namespace": "tokens_a", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens_a": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab, "tokens_a") # specified namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_with_default_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab) # default namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_without_stored_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens_a")
vocab.add_token_to_namespace("word2", "tokens_a")
embedding_params = Params({"vocab_namespace": "tokens_a", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Previous models won't have _vocab_namespace attribute. Force it to be None
embedder._vocab_namespace = None
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens_a": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab, "tokens_a") # specified namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_works_with_pretrained_embedding_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings2.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word3 0.5 0.3 -6.0\n".encode("utf-8"))
embeddings_file.write("word4 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode("utf-8"))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode("utf-8"))
embedding_params = Params(
{
"vocab_namespace": "tokens",
"embedding_dim": 3,
"pretrained_file": embeddings_filename,
}
)
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Change weight to simulate embedding training
embedder.weight.data += 1
assert torch.all(
embedder.weight[2:, :] == torch.Tensor([[2.0, 3.3, 0.0], [1.1, 1.4, -3.0]])
)
original_weight = embedder.weight
assert tuple(original_weight.size()) == (4, 3) # 4 because of padding and OOV
vocab.add_token_to_namespace("word3")
embedder.extend_vocab(
vocab, extension_pretrained_file=embeddings_filename
) # default namespace
extended_weight = embedder.weight
# Make sure extenstion happened for extra token in extended vocab
assert tuple(extended_weight.size()) == (5, 3)
# Make sure extension doesn't change original trained weights.
assert torch.all(original_weight[:4, :] == extended_weight[:4, :])
# Make sure extended weight is taken from the embedding file.
assert torch.all(extended_weight[4, :] == torch.Tensor([0.5, 0.3, -6.0]))
def test_embedding_vocab_extension_is_no_op_when_extension_should_not_happen(self):
# Case1: When vocab is already in sync with embeddings it should be a no-op.
vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
embedder.extend_vocab(vocab, "tokens")
assert torch.all(embedder.weight == original_weight)
# Case2: Shouldn't wrongly assuming "tokens" namespace for extension if no
# information on vocab_namespece is available. Rather log a warning and be a no-op.
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens")
vocab.add_token_to_namespace("word2", "tokens")
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Previous models won't have _vocab_namespace attribute. Force it to be None
embedder._vocab_namespace = None
embedder.weight = torch.nn.Parameter(embedder.weight[:1, :])
assert embedder.weight.shape[0] == 1
embedder.extend_vocab(vocab) # Don't specify namespace
assert embedder.weight.shape[0] == 1
def test_embedding_vocab_extension_raises_error_for_incorrect_vocab(self):
# When vocab namespace of extension vocab is smaller than embeddings
# it should raise configuration error.
vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
with pytest.raises(ConfigurationError):
embedder.extend_vocab(Vocabulary(), "tokens")
def test_embedding_constructed_directly_with_pretrained_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
vocab.add_token_to_namespace(unicode_space)
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
num_embeddings = vocab.get_vocab_size()
embedding_layer = Embedding(
embedding_dim=3,
num_embeddings=num_embeddings,
pretrained_file=embeddings_filename,
vocab=vocab,
)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
| allennlp-master | tests/modules/token_embedders/embedding_test.py |
import numpy as np
import pytest
import torch
from numpy.testing import assert_almost_equal
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
self.non_padded_vocab = Vocabulary(non_padded_namespaces=["tokens"])
def test_forward_calculates_bow_properly(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_zeros_out_unknown_tokens(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab, ignore_oov=True)
numpy_tensor = np.array([[1, 5], [2, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_ignore_oov_should_fail_on_non_padded_vocab(self):
with pytest.raises(ConfigurationError):
BagOfWordCountsTokenEmbedder(self.non_padded_vocab, ignore_oov=True)
def test_projects_properly(self):
embedder = BagOfWordCountsTokenEmbedder(vocab=self.vocab, projection_dim=50)
numpy_tensor = np.array([[1, 0], [1, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| allennlp-master | tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py |
from copy import deepcopy
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules import Seq2VecEncoder
from allennlp.modules.token_embedders import Embedding, TokenCharactersEncoder
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestTokenCharactersEncoder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1", "token_characters")
self.vocab.add_token_to_namespace("2", "token_characters")
self.vocab.add_token_to_namespace("3", "token_characters")
self.vocab.add_token_to_namespace("4", "token_characters")
params = Params(
{
"embedding": {"embedding_dim": 2, "vocab_namespace": "token_characters"},
"encoder": {
"type": "cnn",
"embedding_dim": 2,
"num_filters": 4,
"ngram_filter_sizes": [1, 2],
"output_dim": 3,
},
}
)
self.encoder = TokenCharactersEncoder.from_params(vocab=self.vocab, params=deepcopy(params))
self.embedding = Embedding.from_params(vocab=self.vocab, params=params["embedding"])
self.inner_encoder = Seq2VecEncoder.from_params(params["encoder"])
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(self.encoder)
initializer(self.embedding)
initializer(self.inner_encoder)
def test_get_output_dim_uses_encoder_output_dim(self):
assert self.encoder.get_output_dim() == 3
def test_forward_applies_embedding_then_encoder(self):
numpy_tensor = numpy.random.randint(6, size=(3, 4, 7))
inputs = torch.from_numpy(numpy_tensor)
encoder_output = self.encoder(inputs)
reshaped_input = inputs.view(12, 7)
embedded = self.embedding(reshaped_input)
mask = (inputs != 0).long().view(12, 7)
reshaped_manual_output = self.inner_encoder(embedded, mask)
manual_output = reshaped_manual_output.view(3, 4, 3)
assert_almost_equal(encoder_output.data.numpy(), manual_output.data.numpy())
| allennlp-master | tests/modules/token_embedders/token_characters_encoder_test.py |
import math
import pytest
import torch
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
class TestPretrainedTransformerEmbedder(AllenNlpTestCase):
def test_forward_runs_when_initialized_from_params(self):
# This code just passes things off to `transformers`, so we only have a very simple
# test.
params = Params({"model_name": "bert-base-uncased"})
embedder = PretrainedTransformerEmbedder.from_params(params)
token_ids = torch.randint(0, 100, (1, 4))
mask = torch.randint(0, 2, (1, 4)).bool()
output = embedder(token_ids=token_ids, mask=mask)
assert tuple(output.size()) == (1, 4, 768)
@pytest.mark.parametrize(
"train_parameters, last_layer_only, gradient_checkpointing",
[
(train_parameters, last_layer_only, gradient_checkpointing)
for train_parameters in {True, False}
for last_layer_only in {True, False}
for gradient_checkpointing in {True, False}
if train_parameters
or not gradient_checkpointing # checkpointing only makes sense when we're actually training the layers
],
)
def test_end_to_end(
self,
train_parameters: bool,
last_layer_only: bool,
gradient_checkpointing: bool,
):
tokenizer = PretrainedTransformerTokenizer(model_name="bert-base-uncased")
token_indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased")
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
expected_tokens1 = ["[CLS]", "a", ",", "allen", "##nl", "##p", "sentence", ".", "[SEP]"]
assert [t.text for t in tokens1] == expected_tokens1
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
expected_tokens2 = ["[CLS]", "allen", "##nl", "##p", "is", "great", "[SEP]"]
assert [t.text for t in tokens2] == expected_tokens2
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "bert-base-uncased",
"train_parameters": train_parameters,
"last_layer_only": last_layer_only,
"gradient_checkpointing": gradient_checkpointing,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
assert tokens["bert"]["token_ids"].shape == (2, max_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, False, False],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 9, 768)
assert bert_vectors.requires_grad == (train_parameters or not last_layer_only)
@pytest.mark.parametrize(
"train_parameters, last_layer_only, gradient_checkpointing",
[
(train_parameters, last_layer_only, gradient_checkpointing)
for train_parameters in {True, False}
for last_layer_only in {
True
} # Huggingface T5 is totally different in the way it returns the
# intermediate layers, and we don't support that.
for gradient_checkpointing in {True, False}
if train_parameters
or not gradient_checkpointing # checkpointing only makes sense when we're actually training the layers
],
)
def test_end_to_end_t5(
self,
train_parameters: bool,
last_layer_only: bool,
gradient_checkpointing: bool,
):
tokenizer = PretrainedTransformerTokenizer(model_name="patrickvonplaten/t5-tiny-random")
token_indexer = PretrainedTransformerIndexer(model_name="patrickvonplaten/t5-tiny-random")
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
expected_tokens1 = ["▁A", ",", "▁Allen", "N", "LP", "▁sentence", ".", "</s>"]
assert [t.text for t in tokens1] == expected_tokens1
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
expected_tokens2 = ["▁Allen", "N", "LP", "▁is", "▁great", "</s>"]
assert [t.text for t in tokens2] == expected_tokens2
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "patrickvonplaten/t5-tiny-random",
"train_parameters": train_parameters,
"last_layer_only": last_layer_only,
"gradient_checkpointing": gradient_checkpointing,
"sub_module": "encoder",
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
assert tokens["bert"]["token_ids"].shape == (2, max_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, False, False],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 8, 64)
assert bert_vectors.requires_grad == (train_parameters or not last_layer_only)
def test_big_token_type_ids(self):
token_embedder = PretrainedTransformerEmbedder("roberta-base")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
type_ids = torch.zeros_like(token_ids)
type_ids[1, 1] = 1
with pytest.raises(ValueError):
token_embedder(token_ids, mask, type_ids)
def test_xlnet_token_type_ids(self):
token_embedder = PretrainedTransformerEmbedder("xlnet-base-cased")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
type_ids = torch.zeros_like(token_ids)
type_ids[1, 1] = 1
token_embedder(token_ids, mask, type_ids)
def test_long_sequence_splitting_end_to_end(self):
# Mostly the same as the end_to_end test (except for adding max_length=4),
# because we don't want this splitting behavior to change input/output format.
tokenizer = PretrainedTransformerTokenizer(model_name="bert-base-uncased")
token_indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased", max_length=4)
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "bert-base-uncased",
"max_length": 4,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
# Adds n_segments * 2 special tokens
segment_concat_length = int(math.ceil(max_length / 4)) * 2 + max_length
assert tokens["bert"]["token_ids"].shape == (2, segment_concat_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, False, False],
]
assert tokens["bert"]["segment_concat_mask"].tolist() == [
[True] * segment_concat_length,
[True] * (segment_concat_length - 4) + [False] * 4, # 4 is hard-coded length difference
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 9, 768)
def test_fold_long_sequences(self):
# Let's just say [PAD] is 0, [CLS] is 1, and [SEP] is 2
token_ids = torch.LongTensor(
[
[1, 101, 102, 103, 104, 2, 1, 105, 106, 107, 108, 2, 1, 109, 2],
[1, 201, 202, 203, 204, 2, 1, 205, 206, 207, 208, 2, 0, 0, 0],
[1, 301, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
) # Shape: [3, 15]
segment_concat_mask = (token_ids > 0).long()
folded_token_ids = torch.LongTensor(
[
[1, 101, 102, 103, 104, 2],
[1, 105, 106, 107, 108, 2],
[1, 109, 2, 0, 0, 0],
[1, 201, 202, 203, 204, 2],
[1, 205, 206, 207, 208, 2],
[0, 0, 0, 0, 0, 0],
[1, 301, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
folded_segment_concat_mask = (folded_token_ids > 0).long()
token_embedder = PretrainedTransformerEmbedder("bert-base-uncased", max_length=6)
(
folded_token_ids_out,
folded_segment_concat_mask_out,
_,
) = token_embedder._fold_long_sequences(token_ids, segment_concat_mask)
assert (folded_token_ids_out == folded_token_ids).all()
assert (folded_segment_concat_mask_out == folded_segment_concat_mask).all()
def test_unfold_long_sequences(self):
# Let's just say [PAD] is 0, [CLS] is xxx1, and [SEP] is xxx2
# We assume embeddings are 1-dim and are the same as indices
embeddings = torch.LongTensor(
[
[1001, 101, 102, 103, 104, 1002],
[1011, 105, 106, 107, 108, 1012],
[1021, 109, 1022, 0, 0, 0],
[2001, 201, 202, 203, 204, 2002],
[2011, 205, 206, 207, 208, 2012],
[0, 0, 0, 0, 0, 0],
[3001, 301, 3002, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
).unsqueeze(-1)
mask = (embeddings > 0).long()
unfolded_embeddings = torch.LongTensor(
[
[1001, 101, 102, 103, 104, 105, 106, 107, 108, 109, 1022],
[2001, 201, 202, 203, 204, 205, 206, 207, 208, 2012, 0],
[3001, 301, 3002, 0, 0, 0, 0, 0, 0, 0, 0],
]
).unsqueeze(-1)
token_embedder = PretrainedTransformerEmbedder("bert-base-uncased", max_length=6)
unfolded_embeddings_out = token_embedder._unfold_long_sequences(
embeddings, mask, unfolded_embeddings.size(0), 15
)
assert (unfolded_embeddings_out == unfolded_embeddings).all()
def test_encoder_decoder_model(self):
token_embedder = PretrainedTransformerEmbedder("facebook/bart-large", sub_module="encoder")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
token_embedder(token_ids, mask)
| allennlp-master | tests/modules/token_embedders/pretrained_transformer_embedder_test.py |
import torch
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase
from allennlp.data.batch import Batch
from allennlp.modules.token_embedders import ElmoTokenEmbedder
class TestElmoTokenEmbedder(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "elmo" / "config" / "characters_token_embedder.json",
self.FIXTURES_ROOT / "data" / "conll2003.txt",
)
def test_tagger_with_elmo_token_embedder_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_tagger_with_elmo_token_embedder_forward_pass_runs_correctly(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
training_tensors = dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
probs = output_dict["class_probabilities"]
assert probs.size() == (2, 7, self.model.vocab.get_vocab_size("labels"))
def test_forward_works_with_projection_layer(self):
params = Params(
{
"options_file": self.FIXTURES_ROOT / "elmo" / "options.json",
"weight_file": self.FIXTURES_ROOT / "elmo" / "lm_weights.hdf5",
"projection_dim": 20,
}
)
word1 = [0] * 50
word2 = [0] * 50
word1[0] = 6
word1[1] = 5
word1[2] = 4
word1[3] = 3
word2[0] = 3
word2[1] = 2
word2[2] = 1
word2[3] = 0
embedding_layer = ElmoTokenEmbedder.from_params(vocab=None, params=params)
assert embedding_layer.get_output_dim() == 20
input_tensor = torch.LongTensor([[word1, word2]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 2, 20)
input_tensor = torch.LongTensor([[[word1]]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 1, 1, 20)
| allennlp-master | tests/modules/token_embedders/elmo_token_embedder_test.py |
import codecs
import gzip
import pickle
import shutil
import zipfile
from copy import deepcopy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.tokenizers import CharacterTokenizer
from allennlp.data.vocabulary import (
_NamespaceDependentDefaultDict,
_read_pretrained_tokens,
DEFAULT_OOV_TOKEN,
Vocabulary,
)
from allennlp.modules.token_embedders.embedding import format_embeddings_file_uri
class TestVocabulary(AllenNlpTestCase):
def setup_method(self):
token_indexer = SingleIdTokenIndexer("tokens")
text_field = TextField(
[Token(t) for t in ["a", "a", "a", "a", "b", "b", "c", "c", "c"]],
{"tokens": token_indexer},
)
self.instance = Instance({"text": text_field})
self.dataset = Batch([self.instance])
super().setup_method()
def test_pickling(self):
vocab = Vocabulary.from_instances(self.dataset)
pickled = pickle.dumps(vocab)
unpickled = pickle.loads(pickled)
assert dict(unpickled._index_to_token) == dict(vocab._index_to_token)
assert dict(unpickled._token_to_index) == dict(vocab._token_to_index)
assert unpickled._non_padded_namespaces == vocab._non_padded_namespaces
assert unpickled._oov_token == vocab._oov_token
assert unpickled._padding_token == vocab._padding_token
assert unpickled._retained_counter == vocab._retained_counter
def test_from_dataset_respects_max_vocab_size_single_int(self):
max_vocab_size = 1
vocab = Vocabulary.from_instances(self.dataset, max_vocab_size=max_vocab_size)
words = vocab.get_index_to_token_vocabulary().values()
# Additional 2 tokens are '@@PADDING@@' and '@@UNKNOWN@@' by default
assert len(words) == max_vocab_size + 2
vocab = Vocabulary.from_instances(self.dataset, min_count=None)
words = vocab.get_index_to_token_vocabulary().values()
assert len(words) == 5
def test_from_dataset_respects_min_count(self):
vocab = Vocabulary.from_instances(self.dataset, min_count={"tokens": 4})
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(self.dataset, min_count=None)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" in words
def test_from_dataset_respects_exclusive_embedding_file(self):
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=True,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=True,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" not in words
def test_from_dataset_respects_inclusive_embedding_file(self):
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=False,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=False,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" in words
def test_add_word_to_index_gives_consistent_results(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
# Now add it again, and make sure nothing changes.
vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
def test_namespaces(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word", namespace="1")
assert "word" in vocab.get_index_to_token_vocabulary(namespace="1").values()
assert vocab.get_token_index("word", namespace="1") == word_index
assert vocab.get_token_from_index(word_index, namespace="1") == "word"
assert vocab.get_vocab_size(namespace="1") == initial_vocab_size + 1
# Now add it again, in a different namespace and a different word, and make sure it's like
# new.
word2_index = vocab.add_token_to_namespace("word2", namespace="2")
word_index = vocab.add_token_to_namespace("word", namespace="2")
assert "word" in vocab.get_index_to_token_vocabulary(namespace="2").values()
assert "word2" in vocab.get_index_to_token_vocabulary(namespace="2").values()
assert vocab.get_token_index("word", namespace="2") == word_index
assert vocab.get_token_index("word2", namespace="2") == word2_index
assert vocab.get_token_from_index(word_index, namespace="2") == "word"
assert vocab.get_token_from_index(word2_index, namespace="2") == "word2"
assert vocab.get_vocab_size(namespace="2") == initial_vocab_size + 2
def test_namespace_dependent_default_dict(self):
default_dict = _NamespaceDependentDefaultDict(["bar", "*baz"], lambda: 7, lambda: 3)
# 'foo' is not a padded namespace
assert default_dict["foo"] == 7
# "baz" is a direct match with a padded namespace
assert default_dict["baz"] == 3
# the following match the wildcard "*baz"
assert default_dict["bar"] == 3
assert default_dict["foobaz"] == 3
def test_unknown_token(self):
# We're putting this behavior in a test so that the behavior is documented. There is
# solver code that depends in a small way on how we treat the unknown token, so any
# breaking change to this behavior should break a test, so you know you've done something
# that needs more consideration.
vocab = Vocabulary()
oov_token = vocab._oov_token
oov_index = vocab.get_token_index(oov_token)
assert oov_index == 1
assert vocab.get_token_index("unseen word") == oov_index
def test_get_token_index(self):
# The behavior of get_token_index depends on whether or not the namespace has an OOV token.
vocab = Vocabulary(
counter={"labels": {"foo": 3, "bar": 2}, "tokens": {"foo": 3, "bar": 2}},
non_padded_namespaces=["labels"],
)
# Quick sanity check, this is what the token to index mappings should look like.
expected_token_to_index_dicts = {
"tokens": {vocab._padding_token: 0, vocab._oov_token: 1, "foo": 2, "bar": 3},
"labels": {"foo": 0, "bar": 1},
}
assert vocab._token_to_index["tokens"] == expected_token_to_index_dicts["tokens"]
assert vocab._token_to_index["labels"] == expected_token_to_index_dicts["labels"]
# get_token_index should return the OOV token index for OOV tokens when it can.
assert vocab.get_token_index("baz", "tokens") == 1
# get_token_index should raise helpful error message when token is OOV and there
# is no default OOV token in the namespace.
with pytest.raises(
KeyError,
match=r"'baz' not found .* and namespace does not contain the default OOV token .*",
):
vocab.get_token_index("baz", "labels")
# same should happen for the default OOV token itself, if not in namespace.
with pytest.raises(KeyError, match=rf"'{vocab._oov_token}' not found .*"):
vocab.get_token_index(vocab._oov_token, "labels")
# Now just make sure the token_to_index mappings haven't been modified
# (since we're defaultdicts we need to be a little careful here).
assert vocab._token_to_index["tokens"] == expected_token_to_index_dicts["tokens"]
assert vocab._token_to_index["labels"] == expected_token_to_index_dicts["labels"]
def test_set_from_file_reads_padded_files(self):
vocab_filename = self.TEST_DIR / "vocab_file"
with codecs.open(vocab_filename, "w", "utf-8") as vocab_file:
vocab_file.write("<S>\n")
vocab_file.write("</S>\n")
vocab_file.write("<UNK>\n")
vocab_file.write("a\n")
vocab_file.write("tricky\x0bchar\n")
vocab_file.write("word\n")
vocab_file.write("another\n")
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=True, oov_token="<UNK>")
assert vocab._oov_token == DEFAULT_OOV_TOKEN
assert vocab.get_token_index("random string") == 3
assert vocab.get_token_index("<S>") == 1
assert vocab.get_token_index("</S>") == 2
assert vocab.get_token_index(DEFAULT_OOV_TOKEN) == 3
assert vocab.get_token_index("a") == 4
assert vocab.get_token_index("tricky\x0bchar") == 5
assert vocab.get_token_index("word") == 6
assert vocab.get_token_index("another") == 7
assert vocab.get_token_from_index(0) == vocab._padding_token
assert vocab.get_token_from_index(1) == "<S>"
assert vocab.get_token_from_index(2) == "</S>"
assert vocab.get_token_from_index(3) == DEFAULT_OOV_TOKEN
assert vocab.get_token_from_index(4) == "a"
assert vocab.get_token_from_index(5) == "tricky\x0bchar"
assert vocab.get_token_from_index(6) == "word"
assert vocab.get_token_from_index(7) == "another"
def test_set_from_file_reads_non_padded_files(self):
vocab_filename = self.TEST_DIR / "vocab_file"
with codecs.open(vocab_filename, "w", "utf-8") as vocab_file:
vocab_file.write("B-PERS\n")
vocab_file.write("I-PERS\n")
vocab_file.write("O\n")
vocab_file.write("B-ORG\n")
vocab_file.write("I-ORG\n")
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=False, namespace="tags")
assert vocab.get_token_index("B-PERS", namespace="tags") == 0
assert vocab.get_token_index("I-PERS", namespace="tags") == 1
assert vocab.get_token_index("O", namespace="tags") == 2
assert vocab.get_token_index("B-ORG", namespace="tags") == 3
assert vocab.get_token_index("I-ORG", namespace="tags") == 4
assert vocab.get_token_from_index(0, namespace="tags") == "B-PERS"
assert vocab.get_token_from_index(1, namespace="tags") == "I-PERS"
assert vocab.get_token_from_index(2, namespace="tags") == "O"
assert vocab.get_token_from_index(3, namespace="tags") == "B-ORG"
assert vocab.get_token_from_index(4, namespace="tags") == "I-ORG"
def test_saving_and_loading(self):
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(
["a0", "a1", "a2"], namespace="a"
) # non-padded, should start at 0
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b") # padded, should start at 2
vocab.save_to_files(vocab_dir)
vocab2 = Vocabulary.from_files(vocab_dir)
assert vocab2._non_padded_namespaces == {"a", "c"}
# Check namespace a.
assert vocab2.get_vocab_size(namespace="a") == 3
assert vocab2.get_token_from_index(0, namespace="a") == "a0"
assert vocab2.get_token_from_index(1, namespace="a") == "a1"
assert vocab2.get_token_from_index(2, namespace="a") == "a2"
assert vocab2.get_token_index("a0", namespace="a") == 0
assert vocab2.get_token_index("a1", namespace="a") == 1
assert vocab2.get_token_index("a2", namespace="a") == 2
# Check namespace b.
assert vocab2.get_vocab_size(namespace="b") == 4 # (unk + padding + two tokens)
assert vocab2.get_token_from_index(0, namespace="b") == vocab._padding_token
assert vocab2.get_token_from_index(1, namespace="b") == vocab._oov_token
assert vocab2.get_token_from_index(2, namespace="b") == "b2"
assert vocab2.get_token_from_index(3, namespace="b") == "b3"
assert vocab2.get_token_index(vocab._padding_token, namespace="b") == 0
assert vocab2.get_token_index(vocab._oov_token, namespace="b") == 1
assert vocab2.get_token_index("b2", namespace="b") == 2
assert vocab2.get_token_index("b3", namespace="b") == 3
# Check the dictionaries containing the reverse mapping are identical.
assert vocab.get_index_to_token_vocabulary("a") == vocab2.get_index_to_token_vocabulary("a")
assert vocab.get_index_to_token_vocabulary("b") == vocab2.get_index_to_token_vocabulary("b")
def test_saving_and_loading_works_with_byte_encoding(self):
# We're going to set a vocabulary from a TextField using byte encoding, index it, save the
# vocab, load the vocab, then index the text field again, and make sure we get the same
# result.
tokenizer = CharacterTokenizer(byte_encoding="utf-8")
token_indexer = TokenCharactersIndexer(character_tokenizer=tokenizer, min_padding_length=2)
tokens = [Token(t) for t in ["Øyvind", "für", "汉字"]]
text_field = TextField(tokens, {"characters": token_indexer})
dataset = Batch([Instance({"sentence": text_field})])
vocab = Vocabulary.from_instances(dataset)
text_field.index(vocab)
indexed_tokens = deepcopy(text_field._indexed_tokens)
vocab_dir = self.TEST_DIR / "vocab_save"
vocab.save_to_files(vocab_dir)
vocab2 = Vocabulary.from_files(vocab_dir)
text_field2 = TextField(tokens, {"characters": token_indexer})
text_field2.index(vocab2)
indexed_tokens2 = deepcopy(text_field2._indexed_tokens)
assert indexed_tokens == indexed_tokens2
def test_from_params(self):
# Save a vocab to check we can load it from_params.
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(
["a0", "a1", "a2"], namespace="a"
) # non-padded, should start at 0
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b") # padded, should start at 2
vocab.save_to_files(vocab_dir)
params = Params({"type": "from_files", "directory": vocab_dir})
vocab2 = Vocabulary.from_params(params)
assert vocab.get_index_to_token_vocabulary("a") == vocab2.get_index_to_token_vocabulary("a")
assert vocab.get_index_to_token_vocabulary("b") == vocab2.get_index_to_token_vocabulary("b")
# Test case where we build a vocab from a dataset.
vocab2 = Vocabulary.from_params(Params({}), instances=self.dataset)
assert vocab2.get_index_to_token_vocabulary("tokens") == {
0: "@@PADDING@@",
1: "@@UNKNOWN@@",
2: "a",
3: "c",
4: "b",
}
# Test from_params raises when we have neither a dataset and a vocab_directory.
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(Params({}))
# Test from_params raises when there are any other dict keys
# present apart from 'directory' and we aren't calling from_dataset.
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(
Params({"type": "from_files", "directory": vocab_dir, "min_count": {"tokens": 2}})
)
def test_from_params_adds_tokens_to_vocab(self):
vocab = Vocabulary.from_params(
Params({"tokens_to_add": {"tokens": ["q", "x", "z"]}}), instances=self.dataset
)
assert vocab.get_index_to_token_vocabulary("tokens") == {
0: "@@PADDING@@",
1: "@@UNKNOWN@@",
2: "a",
3: "c",
4: "b",
5: "q",
6: "x",
7: "z",
}
def test_valid_vocab_extension(self):
vocab_dir = self.TEST_DIR / "vocab_save"
# Test: padded/non-padded common namespaces are extending appropriately
non_padded_namespaces_list = [[], ["tokens"]]
for non_padded_namespaces in non_padded_namespaces_list:
original_vocab = Vocabulary(non_padded_namespaces=non_padded_namespaces)
original_vocab.add_tokens_to_namespace(["d", "a", "b"], namespace="tokens")
text_field = TextField(
[Token(t) for t in ["a", "d", "c", "e"]], {"tokens": SingleIdTokenIndexer("tokens")}
)
vocab_dir = self.TEST_DIR / "vocab_save"
shutil.rmtree(vocab_dir, ignore_errors=True)
original_vocab.save_to_files(vocab_dir)
instances = Batch([Instance({"text": text_field})])
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": non_padded_namespaces,
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
extra_count = 2 if extended_vocab.is_padded("tokens") else 0
assert extended_vocab.get_token_index("d", "tokens") == 0 + extra_count
assert extended_vocab.get_token_index("a", "tokens") == 1 + extra_count
assert extended_vocab.get_token_index("b", "tokens") == 2 + extra_count
assert extended_vocab.get_token_index("c", "tokens") # should be present
assert extended_vocab.get_token_index("e", "tokens") # should be present
assert extended_vocab.get_vocab_size("tokens") == 5 + extra_count
# Test: padded/non-padded non-common namespaces are extending appropriately
non_padded_namespaces_list = [[], ["tokens1"], ["tokens1", "tokens2"]]
for non_padded_namespaces in non_padded_namespaces_list:
original_vocab = Vocabulary(non_padded_namespaces=non_padded_namespaces)
original_vocab.add_token_to_namespace("a", namespace="tokens1") # index2
text_field = TextField(
[Token(t) for t in ["b"]], {"tokens2": SingleIdTokenIndexer("tokens2")}
)
instances = Batch([Instance({"text": text_field})])
vocab_dir = self.TEST_DIR / "vocab_save"
shutil.rmtree(vocab_dir, ignore_errors=True)
original_vocab.save_to_files(vocab_dir)
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": non_padded_namespaces,
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
# Should have two namespaces
assert len(extended_vocab._token_to_index) == 2
extra_count = 2 if extended_vocab.is_padded("tokens1") else 0
assert extended_vocab.get_vocab_size("tokens1") == 1 + extra_count
extra_count = 2 if extended_vocab.is_padded("tokens2") else 0
assert extended_vocab.get_vocab_size("tokens2") == 1 + extra_count
def test_invalid_vocab_extension(self):
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens1"])
original_vocab.add_tokens_to_namespace(["a", "b"], namespace="tokens1")
original_vocab.add_token_to_namespace("p", namespace="tokens2")
original_vocab.save_to_files(vocab_dir)
text_field1 = TextField(
[Token(t) for t in ["a", "c"]], {"tokens1": SingleIdTokenIndexer("tokens1")}
)
text_field2 = TextField(
[Token(t) for t in ["p", "q", "r"]], {"tokens2": SingleIdTokenIndexer("tokens2")}
)
instances = Batch([Instance({"text1": text_field1, "text2": text_field2})])
# Following 2 should give error: tokens1 is non-padded in original_vocab but not in instances
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": [],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
# Following 2 should not give error: overlapping namespaces have same padding setting
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1"],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
Vocabulary.from_params(params, instances=instances)
# Following 2 should give error: tokens2 is padded in instances but not in original_vocab
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1", "tokens2"],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
def test_from_params_extend_config(self):
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens"])
original_vocab.add_token_to_namespace("a", namespace="tokens")
original_vocab.save_to_files(vocab_dir)
text_field = TextField(
[Token(t) for t in ["a", "b"]], {"tokens": SingleIdTokenIndexer("tokens")}
)
instances = Batch([Instance({"text": text_field})])
# If you ask to extend vocab from `directory`, instances must be passed
# in Vocabulary constructor, or else there is nothing to extend to.
params = Params({"type": "extend", "directory": vocab_dir})
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params)
# If you ask to extend vocab, `directory` key must be present in params,
# or else there is nothing to extend from.
params = Params({"type": "extend"})
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
def test_from_params_valid_vocab_extension_thoroughly(self):
"""
Tests for Valid Vocab Extension thoroughly: Vocab extension is valid
when overlapping namespaces have same padding behaviour (padded/non-padded)
Summary of namespace paddings in this test:
original_vocab namespaces
tokens0 padded
tokens1 non-padded
tokens2 padded
tokens3 non-padded
instances namespaces
tokens0 padded
tokens1 non-padded
tokens4 padded
tokens5 non-padded
TypicalExtention example: (of tokens1 namespace)
-> original_vocab index2token
apple #0->apple
bat #1->bat
cat #2->cat
-> Token to be extended with: cat, an, apple, banana, atom, bat
-> extended_vocab: index2token
apple #0->apple
bat #1->bat
cat #2->cat
an #3->an
atom #4->atom
banana #5->banana
"""
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens1", "tokens3"])
original_vocab.add_token_to_namespace("apple", namespace="tokens0") # index:2
original_vocab.add_token_to_namespace("bat", namespace="tokens0") # index:3
original_vocab.add_token_to_namespace("cat", namespace="tokens0") # index:4
original_vocab.add_token_to_namespace("apple", namespace="tokens1") # index:0
original_vocab.add_token_to_namespace("bat", namespace="tokens1") # index:1
original_vocab.add_token_to_namespace("cat", namespace="tokens1") # index:2
original_vocab.add_token_to_namespace("a", namespace="tokens2") # index:0
original_vocab.add_token_to_namespace("b", namespace="tokens2") # index:1
original_vocab.add_token_to_namespace("c", namespace="tokens2") # index:2
original_vocab.add_token_to_namespace("p", namespace="tokens3") # index:0
original_vocab.add_token_to_namespace("q", namespace="tokens3") # index:1
original_vocab.save_to_files(vocab_dir)
text_field0 = TextField(
[Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens0": SingleIdTokenIndexer("tokens0")},
)
text_field1 = TextField(
[Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens1": SingleIdTokenIndexer("tokens1")},
)
text_field4 = TextField(
[Token(t) for t in ["l", "m", "n", "o"]], {"tokens4": SingleIdTokenIndexer("tokens4")}
)
text_field5 = TextField(
[Token(t) for t in ["x", "y", "z"]], {"tokens5": SingleIdTokenIndexer("tokens5")}
)
instances = Batch(
[
Instance(
{
"text0": text_field0,
"text1": text_field1,
"text4": text_field4,
"text5": text_field5,
}
)
]
)
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1", "tokens5"],
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
# namespaces: tokens0, tokens1 is common.
# tokens2, tokens3 only vocab has. tokens4, tokens5 only instances
extended_namespaces = {*extended_vocab._token_to_index}
assert extended_namespaces == {"tokens{}".format(i) for i in range(6)}
# # Check that _non_padded_namespaces list is consistent after extension
assert extended_vocab._non_padded_namespaces == {"tokens1", "tokens3", "tokens5"}
# # original_vocab["tokens1"] has 3 tokens, instances of "tokens1" ns has 5 tokens. 2 overlapping
assert extended_vocab.get_vocab_size("tokens1") == 6
assert extended_vocab.get_vocab_size("tokens0") == 8 # 2 extra overlapping because padded
# namespace tokens3, tokens4 was only in original_vocab,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens2") == original_vocab.get_vocab_size("tokens2")
assert extended_vocab.get_vocab_size("tokens3") == original_vocab.get_vocab_size("tokens3")
# namespace tokens2 was only in instances,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens4") == 6 # l,m,n,o + oov + padding
assert extended_vocab.get_vocab_size("tokens5") == 3 # x,y,z
# Word2index mapping of all words in all namespaces of original_vocab
# should be maintained in extended_vocab
for namespace, token2index in original_vocab._token_to_index.items():
for token, _ in token2index.items():
vocab_index = original_vocab.get_token_index(token, namespace)
extended_vocab_index = extended_vocab.get_token_index(token, namespace)
assert vocab_index == extended_vocab_index
# And same for Index2Word mapping
for namespace, index2token in original_vocab._index_to_token.items():
for index, _ in index2token.items():
vocab_token = original_vocab.get_token_from_index(index, namespace)
extended_vocab_token = extended_vocab.get_token_from_index(index, namespace)
assert vocab_token == extended_vocab_token
# Manual Print Check
# original_vocab._token_to_index :>
# {
# "tokens0": {"@@PADDING@@":0,"@@UNKNOWN@@":1,"apple":2,"bat":3,"cat":4},
# "tokens1": {"apple": 0,"bat":1,"cat":2},
# "tokens2": {"@@PADDING@@":0,"@@UNKNOWN@@":1,"a":2,"b":3,"c": 4},
# "tokens3": {"p":0,"q":1}
# }
# extended_vocab._token_to_index :>
# {
# "tokens0": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,
# "apple": 2,"bat": 3,"cat": 4,"an": 5,"banana": 6,"atom": 7},
# "tokens1": {"apple": 0,"bat": 1,"cat": 2,"an": 3,"banana": 4,"atom": 5},
# "tokens2": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,"a": 2,"b": 3,"c": 4},
# "tokens3": {"p": 0,"q": 1},
# "tokens4": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,"l": 2,"m": 3,"n": 4,"o": 5},
# "tokens5": {"x": 0,"y": 1,"z": 2}
# }
def test_vocab_can_print(self):
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(["a0", "a1", "a2"], namespace="a")
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b")
print(vocab)
def test_read_pretrained_words(self):
# The fixture "fake_embeddings.5d.txt" was generated using the words in this random quote
words = set(
"If you think you are too small to make a difference "
"try to sleeping with a mosquito àèìòù".split(" ")
)
# Reading from a single (compressed) file or a single-file archive
base_path = str(self.FIXTURES_ROOT / "embeddings/fake_embeddings.5d.txt")
for ext in ["", ".gz", ".lzma", ".bz2", ".zip", ".tar.gz"]:
file_path = base_path + ext
words_read = set(_read_pretrained_tokens(file_path))
assert words_read == words, (
f"Wrong words for file {file_path}\n"
f" Read: {sorted(words_read)}\n"
f"Correct: {sorted(words)}"
)
# Reading from a multi-file archive
base_path = str(self.FIXTURES_ROOT / "embeddings/multi-file-archive")
file_path = "folder/fake_embeddings.5d.txt"
for ext in [".zip", ".tar.gz"]:
archive_path = base_path + ext
embeddings_file_uri = format_embeddings_file_uri(archive_path, file_path)
words_read = set(_read_pretrained_tokens(embeddings_file_uri))
assert words_read == words, (
f"Wrong words for file {archive_path}\n"
f" Read: {sorted(words_read)}\n"
f"Correct: {sorted(words)}"
)
def test_from_instances_exclusive_embeddings_file_inside_archive(self):
""" Just for ensuring there are no problems when reading pretrained tokens from an archive """
# Read embeddings file from archive
archive_path = str(self.TEST_DIR / "embeddings-archive.zip")
with zipfile.ZipFile(archive_path, "w") as archive:
file_path = "embedding.3d.vec"
with archive.open(file_path, "w") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
with archive.open("dummy.vec", "w") as dummy_file:
dummy_file.write("c 1.0 2.3 -1.0 3.0\n".encode("utf-8"))
embeddings_file_uri = format_embeddings_file_uri(archive_path, file_path)
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_file_uri},
only_include_pretrained_words=True,
)
words = set(vocab.get_index_to_token_vocabulary().values())
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_file_uri},
only_include_pretrained_words=True,
)
words = set(vocab.get_index_to_token_vocabulary().values())
assert "a" in words
assert "b" in words
assert "c" not in words
def test_registrability(self):
@Vocabulary.register("my-vocabulary", constructor="constructor")
class MyVocabulary(Vocabulary):
@classmethod
def constructor(cls):
return MyVocabulary()
params = Params({"type": "my-vocabulary"})
instance = Instance(fields={})
vocab = Vocabulary.from_params(params=params, instances=[instance])
assert isinstance(vocab, MyVocabulary)
def test_max_vocab_size_dict(self):
params = Params({"max_vocab_size": {"tokens": 1, "characters": 20}})
vocab = Vocabulary.from_params(params=params, instances=self.dataset)
words = vocab.get_index_to_token_vocabulary().values()
# Additional 2 tokens are '@@PADDING@@' and '@@UNKNOWN@@' by default
assert len(words) == 3
def test_max_vocab_size_partial_dict(self):
indexers = {
"tokens": SingleIdTokenIndexer(),
"token_characters": TokenCharactersIndexer(min_padding_length=3),
}
instance = Instance(
{
"text": TextField(
[Token(w) for w in "Abc def ghi jkl mno pqr stu vwx yz".split(" ")], indexers
)
}
)
dataset = Batch([instance])
params = Params({"max_vocab_size": {"tokens": 1}})
vocab = Vocabulary.from_params(params=params, instances=dataset)
assert len(vocab.get_index_to_token_vocabulary("tokens").values()) == 3 # 1 + 2
assert len(vocab.get_index_to_token_vocabulary("token_characters").values()) == 28 # 26 + 2
def test_min_pretrained_embeddings(self):
params = Params(
{
"pretrained_files": {
"tokens": str(self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz")
},
"min_pretrained_embeddings": {"tokens": 50},
}
)
vocab = Vocabulary.from_params(params=params, instances=self.dataset)
assert vocab.get_vocab_size() >= 50
assert vocab.get_token_index("his") > 1 # not @@UNKNOWN@@
def test_custom_padding_oov_tokens(self):
vocab = Vocabulary(oov_token="[UNK]")
assert vocab._oov_token == "[UNK]"
assert vocab._padding_token == "@@PADDING@@"
vocab = Vocabulary(padding_token="[PAD]")
assert vocab._oov_token == "@@UNKNOWN@@"
assert vocab._padding_token == "[PAD]"
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(oov_token="<UNK>")
vocab.add_tokens_to_namespace(["a0", "a1", "a2"], namespace="a")
vocab.save_to_files(vocab_dir)
params = Params({"type": "from_files", "directory": vocab_dir, "oov_token": "<UNK>"})
vocab = Vocabulary.from_params(params)
with pytest.raises(AssertionError) as excinfo:
vocab = Vocabulary.from_params(Params({"type": "from_files", "directory": vocab_dir}))
assert "OOV token not found!" in str(excinfo.value)
def test_extend_from_vocab(self):
vocab1 = Vocabulary(non_padded_namespaces={"1", "2"})
vocab2 = Vocabulary(non_padded_namespaces={"3"})
vocab1.add_tokens_to_namespace(["a", "b", "c"], namespace="1")
vocab1.add_tokens_to_namespace(["d", "e", "f"], namespace="2")
vocab2.add_tokens_to_namespace(["c", "d", "e"], namespace="1")
vocab2.add_tokens_to_namespace(["g", "h", "i"], namespace="3")
vocab1.extend_from_vocab(vocab2)
assert vocab1.get_namespaces() == {"1", "2", "3"}
assert vocab1._non_padded_namespaces == {"1", "2", "3"}
assert vocab1.get_token_to_index_vocabulary("1") == {
"a": 0,
"b": 1,
"c": 2,
"@@PADDING@@": 3,
"@@UNKNOWN@@": 4,
"d": 5,
"e": 6,
}
assert vocab1.get_token_to_index_vocabulary("2") == {"d": 0, "e": 1, "f": 2}
assert vocab1.get_token_to_index_vocabulary("3") == {"g": 0, "h": 1, "i": 2}
class TestVocabularyFromFilesWithArchive(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.tar_archive = self.TEST_DIR / "vocab.tar.gz"
self.zip_archive = self.TEST_DIR / "vocab.zip"
self.model_archive = self.TEST_DIR / "model.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "data" / "vocab.tar.gz",
self.tar_archive,
)
shutil.copyfile(
self.FIXTURES_ROOT / "data" / "vocab.zip",
self.zip_archive,
)
shutil.copyfile(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz",
self.model_archive,
)
def test_from_files_with_zip_archive(self):
vocab = Vocabulary.from_files(str(self.zip_archive))
vocab.get_namespaces() == {"tokens"}
assert vocab.get_token_from_index(3, namespace="tokens") == ","
def test_from_files_with_tar_archive(self):
vocab = Vocabulary.from_files(str(self.tar_archive))
vocab.get_namespaces() == {"tokens"}
assert vocab.get_token_from_index(3, namespace="tokens") == ","
def test_from_files_with_model_archive(self):
vocab = Vocabulary.from_files(str(self.model_archive))
vocab.get_namespaces() == {"tokens", "labels"}
assert vocab.get_token_from_index(3, namespace="tokens") == "u.n."
| allennlp-master | tests/data/vocabulary_test.py |
allennlp-master | tests/data/__init__.py |
|
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestDataset(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("this")
self.vocab.add_token_to_namespace("is")
self.vocab.add_token_to_namespace("a")
self.vocab.add_token_to_namespace("sentence")
self.vocab.add_token_to_namespace(".")
self.token_indexer = {"tokens": SingleIdTokenIndexer()}
self.instances = self.get_instances()
super().setup_method()
def test_instances_must_have_homogeneous_fields(self):
instance1 = Instance({"tag": (LabelField(1, skip_indexing=True))})
instance2 = Instance({"words": TextField([Token("hello")], {})})
with pytest.raises(ConfigurationError):
_ = Batch([instance1, instance2])
def test_padding_lengths_uses_max_instance_lengths(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
padding_lengths = dataset.get_padding_lengths()
assert padding_lengths == {"text1": {"tokens___tokens": 5}, "text2": {"tokens___tokens": 6}}
def test_as_tensor_dict(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
padding_lengths = dataset.get_padding_lengths()
tensors = dataset.as_tensor_dict(padding_lengths)
text1 = tensors["text1"]["tokens"]["tokens"].detach().cpu().numpy()
text2 = tensors["text2"]["tokens"]["tokens"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
text1, numpy.array([[2, 3, 4, 5, 6], [1, 3, 4, 5, 6]])
)
numpy.testing.assert_array_almost_equal(
text2, numpy.array([[2, 3, 4, 1, 5, 6], [2, 3, 1, 0, 0, 0]])
)
def get_instances(self):
field1 = TextField(
[Token(t) for t in ["this", "is", "a", "sentence", "."]], self.token_indexer
)
field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence", "."]],
self.token_indexer,
)
field3 = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]], self.token_indexer
)
field4 = TextField([Token(t) for t in ["this", "is", "short"]], self.token_indexer)
instances = [
Instance({"text1": field1, "text2": field2}),
Instance({"text1": field3, "text2": field4}),
]
return instances
| allennlp-master | tests/data/dataset_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
class TestInstance(AllenNlpTestCase):
def test_instance_implements_mutable_mapping(self):
words_field = TextField([Token("hello")], {})
label_field = LabelField(1, skip_indexing=True)
instance = Instance({"words": words_field, "labels": label_field})
assert instance["words"] == words_field
assert instance["labels"] == label_field
assert len(instance) == 2
keys = {k for k, v in instance.items()}
assert keys == {"words", "labels"}
values = [v for k, v in instance.items()]
assert words_field in values
assert label_field in values
def test_duplicate(self):
# Verify the `duplicate()` method works with a `PretrainedTransformerIndexer` in
# a `TextField`. See https://github.com/allenai/allennlp/issues/4270.
instance = Instance(
{
"words": TextField(
[Token("hello")], {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
)
}
)
other = instance.duplicate()
assert other == instance
# Adding new fields to the original instance should not effect the duplicate.
instance.add_field("labels", LabelField("some_label"))
assert "labels" not in other.fields
assert other != instance # sanity check on the '__eq__' method.
| allennlp-master | tests/data/instance_test.py |
from typing import Iterable
import pytest
from allennlp.data.fields import LabelField
from allennlp.data.instance import Instance
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
@pytest.mark.parametrize("lazy", (True, False))
def test_loader_uses_all_instances_when_batches_per_epochs_set(lazy):
NUM_INSTANCES = 20
BATCH_SIZE = 2
BATCHES_PER_EPOCH = 3
EPOCHS = 4
class FakeDatasetReader(DatasetReader):
def _read(self, filename: str) -> Iterable[Instance]:
for i in range(NUM_INSTANCES):
yield Instance({"index": LabelField(i, skip_indexing=True)})
reader = FakeDatasetReader(lazy=lazy)
dataset = reader.read("blah")
loader = PyTorchDataLoader(dataset, batch_size=BATCH_SIZE, batches_per_epoch=BATCHES_PER_EPOCH)
epoch_batches = []
for epoch in range(EPOCHS):
batches = []
for batch in loader:
instances = []
for index in batch["index"]:
instances.append(index)
batches.append(instances)
epoch_batches.append(batches)
assert epoch_batches == [
# Epoch 0.
[[0, 1], [2, 3], [4, 5]],
# Epoch 1.
[[6, 7], [8, 9], [10, 11]],
# Epoch 2.
[[12, 13], [14, 15], [16, 17]],
# Epoch 3.
[[18, 19], [0, 1], [2, 3]],
]
| allennlp-master | tests/data/dataloader_test.py |
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token, SpacyTokenizer
class TestSpacyTokenizer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.word_tokenizer = SpacyTokenizer()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = [
"this",
"(",
"sentence",
")",
"has",
"'",
"crazy",
"'",
'"',
"punctuation",
'"',
".",
]
tokens = self.word_tokenizer.tokenize(sentence)
token_text = [t.text for t in tokens]
assert token_text == expected_tokens
for token in tokens:
start = token.idx
end = start + len(token.text)
assert sentence[start:end] == token.text
def test_tokenize_handles_contraction(self):
# note that "would've" is kept together, while "ain't" is not.
sentence = "it ain't joe's problem; would been yesterday"
expected_tokens = [
"it",
"ai",
"n't",
"joe",
"'s",
"problem",
";",
"would",
"been",
"yesterday",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_multiple_contraction(self):
sentence = "wouldn't've"
expected_tokens = ["would", "n't", "'ve"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_final_apostrophe(self):
sentence = "the jones' house"
expected_tokens = ["the", "jones", "'", "house"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_removes_whitespace_tokens(self):
sentence = "the\n jones' house \x0b 55"
expected_tokens = ["the", "jones", "'", "house", "55"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_special_cases(self):
# note that the etc. doesn't quite work --- we can special case this if we want.
sentence = "Mr. and Mrs. Jones, etc., went to, e.g., the store"
expected_tokens = [
"Mr.",
"and",
"Mrs.",
"Jones",
",",
"etc",
".",
",",
"went",
"to",
",",
"e.g.",
",",
"the",
"store",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_batch_tokenization(self):
sentences = [
"This is a sentence",
"This isn't a sentence.",
"This is the 3rd sentence." "Here's the 'fourth' sentence.",
]
batch_split = self.word_tokenizer.batch_tokenize(sentences)
separately_split = [self.word_tokenizer.tokenize(sentence) for sentence in sentences]
assert len(batch_split) == len(separately_split)
for batch_sentence, separate_sentence in zip(batch_split, separately_split):
assert len(batch_sentence) == len(separate_sentence)
for batch_word, separate_word in zip(batch_sentence, separate_sentence):
assert batch_word.text == separate_word.text
def test_keep_spacy_tokens(self):
word_tokenizer = SpacyTokenizer()
sentence = "This should be an allennlp Token"
tokens = word_tokenizer.tokenize(sentence)
assert tokens
assert all(isinstance(token, Token) for token in tokens)
word_tokenizer = SpacyTokenizer(keep_spacy_tokens=True)
sentence = "This should be a spacy Token"
tokens = word_tokenizer.tokenize(sentence)
assert tokens
assert all(isinstance(token, spacy.tokens.Token) for token in tokens)
| allennlp-master | tests/data/tokenizers/spacy_tokenizer_test.py |
from typing import Iterable, List
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerTokenizer(AllenNlpTestCase):
def test_splits_roberta(self):
tokenizer = PretrainedTransformerTokenizer("roberta-base")
sentence = "A, <mask> AllenNLP sentence."
expected_tokens = [
"<s>",
"A",
",",
"<mask>",
"ĠAllen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_cased_bert(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_uncased_bert(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_reformer_small(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"▁A",
",",
"▁",
"<unk>",
"M",
"A",
"S",
"K",
"<unk>",
"▁A",
"ll",
"en",
"N",
"L",
"P",
"▁s",
"ent",
"en",
"ce",
".",
]
tokenizer = PretrainedTransformerTokenizer("google/reformer-crime-and-punishment")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_token_idx_bert_uncased(self):
sentence = "A, naïve [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"naive", # BERT normalizes this away
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
expected_idxs = [None, 0, 1, 3, 9, 16, 21, 23, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_token_idx_bert_cased(self):
sentence = "A, naïve [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"na",
"##ï",
"##ve",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_idxs = [None, 0, 1, 3, 5, 6, 9, 16, 21, 23, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_max_length(self):
tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", max_length=10, add_special_tokens=False
)
tokens = tokenizer.tokenize(
"hi there, this should be at least 10 tokens, but some will be truncated"
)
assert len(tokens) == 10
def test_no_max_length(self):
tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", max_length=None, add_special_tokens=False
)
# Even though the bert model has a max input length of 512, when we tokenize
# with `max_length = None`, we should not get any truncation.
tokens = tokenizer.tokenize(" ".join(["a"] * 550))
assert len(tokens) == 550
def test_token_idx_roberta(self):
sentence = "A, naïve <mask> AllenNLP sentence."
expected_tokens = [
"<s>",
"A",
",",
"Ġnaïve", # RoBERTa mangles this. Or maybe it "encodes"?
"<mask>",
"ĠAllen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
]
expected_idxs = [None, 0, 1, 3, 9, 16, 21, 22, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("roberta-base")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_token_idx_wikipedia(self):
sentence = (
"Tokyo (東京 Tōkyō, English: /ˈtoʊkioʊ/,[7] Japanese: [toːkʲoː]), officially "
"Tokyo Metropolis (東京都 Tōkyō-to), is one of the 47 prefectures of Japan."
)
for tokenizer_name in ["roberta-base", "bert-base-uncased", "bert-base-cased"]:
tokenizer = PretrainedTransformerTokenizer(tokenizer_name)
tokenized = tokenizer.tokenize(sentence)
assert tokenized[-2].text == "."
assert tokenized[-2].idx == len(sentence) - 1
def test_intra_word_tokenize(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence.".split(" ")
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_offsets = [(1, 2), (3, 3), (4, 6), (7, 8)]
tokens, offsets = tokenizer.intra_word_tokenize(sentence)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets == expected_offsets
# sentence pair
sentence_1 = "A, [MASK] AllenNLP sentence.".split(" ")
sentence_2 = "A sentence.".split(" ")
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
"A", # 10
"sentence",
".",
"[SEP]",
]
expected_offsets_a = [(1, 2), (3, 3), (4, 6), (7, 8)]
expected_offsets_b = [(10, 10), (11, 12)]
tokens, offsets_a, offsets_b = tokenizer.intra_word_tokenize_sentence_pair(
sentence_1, sentence_2
)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets_a == expected_offsets_a
assert offsets_b == expected_offsets_b
def test_intra_word_tokenize_whitespaces(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = ["A,", " ", "[MASK]", "AllenNLP", "\u007f", "sentence."]
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_offsets = [(1, 2), None, (3, 3), (4, 6), None, (7, 8)]
tokens, offsets = tokenizer.intra_word_tokenize(sentence)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets == expected_offsets
def test_special_tokens_added(self):
def get_token_ids(tokens: Iterable[Token]) -> List[int]:
return [t.text_id for t in tokens]
def get_type_ids(tokens: Iterable[Token]) -> List[int]:
return [t.type_id for t in tokens]
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
assert get_token_ids(tokenizer.sequence_pair_start_tokens) == [101]
assert get_token_ids(tokenizer.sequence_pair_mid_tokens) == [102]
assert get_token_ids(tokenizer.sequence_pair_end_tokens) == [102]
assert get_token_ids(tokenizer.single_sequence_start_tokens) == [101]
assert get_token_ids(tokenizer.single_sequence_end_tokens) == [102]
assert get_type_ids(tokenizer.sequence_pair_start_tokens) == [0]
assert tokenizer.sequence_pair_first_token_type_id == 0
assert get_type_ids(tokenizer.sequence_pair_mid_tokens) == [0]
assert tokenizer.sequence_pair_second_token_type_id == 1
assert get_type_ids(tokenizer.sequence_pair_end_tokens) == [1]
assert get_type_ids(tokenizer.single_sequence_start_tokens) == [0]
assert tokenizer.single_sequence_token_type_id == 0
assert get_type_ids(tokenizer.single_sequence_end_tokens) == [0]
tokenizer = PretrainedTransformerTokenizer("xlnet-base-cased")
assert get_token_ids(tokenizer.sequence_pair_start_tokens) == []
assert get_token_ids(tokenizer.sequence_pair_mid_tokens) == [4]
assert get_token_ids(tokenizer.sequence_pair_end_tokens) == [4, 3]
assert get_token_ids(tokenizer.single_sequence_start_tokens) == []
assert get_token_ids(tokenizer.single_sequence_end_tokens) == [4, 3]
assert get_type_ids(tokenizer.sequence_pair_start_tokens) == []
assert tokenizer.sequence_pair_first_token_type_id == 0
assert get_type_ids(tokenizer.sequence_pair_mid_tokens) == [0]
assert tokenizer.sequence_pair_second_token_type_id == 1
assert get_type_ids(tokenizer.sequence_pair_end_tokens) == [1, 2]
assert get_type_ids(tokenizer.single_sequence_start_tokens) == []
assert tokenizer.single_sequence_token_type_id == 0
assert get_type_ids(tokenizer.single_sequence_end_tokens) == [0, 2]
def test_tokenizer_kwargs_default(self):
text = "Hello there! General Kenobi."
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
original_tokens = [
"[CLS]",
"Hello",
"there",
"!",
"General",
"Ken",
"##ob",
"##i",
".",
"[SEP]",
]
tokenized = [token.text for token in tokenizer.tokenize(text)]
assert tokenized == original_tokens
def test_from_params_kwargs(self):
PretrainedTransformerTokenizer.from_params(
Params({"model_name": "bert-base-uncased", "tokenizer_kwargs": {"max_len": 10}})
)
| allennlp-master | tests/data/tokenizers/pretrained_transformer_tokenizer_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
class TestSentenceSplitter(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.dep_parse_splitter = SpacySentenceSplitter(rule_based=False)
self.rule_based_splitter = SpacySentenceSplitter(rule_based=True)
def test_rule_based_splitter_passes_through_correctly(self):
text = "This is the first sentence. This is the second sentence! "
tokens = self.rule_based_splitter.split_sentences(text)
expected_tokens = ["This is the first sentence.", "This is the second sentence!"]
assert tokens == expected_tokens
def test_dep_parse_splitter_passes_through_correctly(self):
text = "This is the first sentence. This is the second sentence! "
tokens = self.dep_parse_splitter.split_sentences(text)
expected_tokens = ["This is the first sentence.", "This is the second sentence!"]
assert tokens == expected_tokens
def test_batch_rule_based_sentence_splitting(self):
text = [
"This is a sentence. This is a second sentence.",
"This isn't a sentence. This is a second sentence! This is a third sentence.",
]
batch_split = self.rule_based_splitter.batch_split_sentences(text)
separately_split = [self.rule_based_splitter.split_sentences(doc) for doc in text]
assert len(batch_split) == len(separately_split)
for batch_doc, separate_doc in zip(batch_split, separately_split):
assert len(batch_doc) == len(separate_doc)
for batch_sentence, separate_sentence in zip(batch_doc, separate_doc):
assert batch_sentence == separate_sentence
def test_batch_dep_parse_sentence_splitting(self):
text = [
"This is a sentence. This is a second sentence.",
"This isn't a sentence. This is a second sentence! This is a third sentence.",
]
batch_split = self.dep_parse_splitter.batch_split_sentences(text)
separately_split = [self.dep_parse_splitter.split_sentences(doc) for doc in text]
assert len(batch_split) == len(separately_split)
for batch_doc, separate_doc in zip(batch_split, separately_split):
assert len(batch_doc) == len(separate_doc)
for batch_sentence, separate_sentence in zip(batch_doc, separate_doc):
assert batch_sentence == separate_sentence
| allennlp-master | tests/data/tokenizers/sentence_splitter_test.py |
allennlp-master | tests/data/tokenizers/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import CharacterTokenizer
class TestCharacterTokenizer(AllenNlpTestCase):
def test_splits_into_characters(self):
tokenizer = CharacterTokenizer(start_tokens=["<S1>", "<S2>"], end_tokens=["</S2>", "</S1>"])
sentence = "A, small sentence."
tokens = [t.text for t in tokenizer.tokenize(sentence)]
expected_tokens = [
"<S1>",
"<S2>",
"A",
",",
" ",
"s",
"m",
"a",
"l",
"l",
" ",
"s",
"e",
"n",
"t",
"e",
"n",
"c",
"e",
".",
"</S2>",
"</S1>",
]
assert tokens == expected_tokens
def test_batch_tokenization(self):
tokenizer = CharacterTokenizer()
sentences = [
"This is a sentence",
"This isn't a sentence.",
"This is the 3rd sentence." "Here's the 'fourth' sentence.",
]
batch_tokenized = tokenizer.batch_tokenize(sentences)
separately_tokenized = [tokenizer.tokenize(sentence) for sentence in sentences]
assert len(batch_tokenized) == len(separately_tokenized)
for batch_sentence, separate_sentence in zip(batch_tokenized, separately_tokenized):
assert len(batch_sentence) == len(separate_sentence)
for batch_word, separate_word in zip(batch_sentence, separate_sentence):
assert batch_word.text == separate_word.text
def test_handles_byte_encoding(self):
tokenizer = CharacterTokenizer(byte_encoding="utf-8", start_tokens=[259], end_tokens=[260])
word = "åøâáabe"
tokens = [t.text_id for t in tokenizer.tokenize(word)]
# Note that we've added one to the utf-8 encoded bytes, to account for masking.
expected_tokens = [259, 196, 166, 196, 185, 196, 163, 196, 162, 98, 99, 102, 260]
assert tokens == expected_tokens
| allennlp-master | tests/data/tokenizers/character_tokenizer_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token, LettersDigitsTokenizer
class TestLettersDigitsTokenizer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.word_tokenizer = LettersDigitsTokenizer()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = [
"this",
"(",
"sentence",
")",
"has",
"'",
"crazy",
"'",
'"',
"punctuation",
'"',
".",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_unicode_letters(self):
sentence = "HAL9000 and Ångström"
expected_tokens = [
Token("HAL", 0),
Token("9000", 3),
Token("and", 10),
Token("Ångström", 17),
]
tokens = self.word_tokenizer.tokenize(sentence)
assert [t.text for t in tokens] == [t.text for t in expected_tokens]
assert [t.idx for t in tokens] == [t.idx for t in expected_tokens]
def test_tokenize_handles_splits_all_punctuation(self):
sentence = "wouldn't.[have] -3.45(m^2)"
expected_tokens = [
"wouldn",
"'",
"t",
".",
"[",
"have",
"]",
"-",
"3",
".",
"45",
"(",
"m",
"^",
"2",
")",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
| allennlp-master | tests/data/tokenizers/letters_digits_tokenizer_test.py |
from typing import Iterable
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import DatasetReader, InterleavingDatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import SpacyTokenizer
class PlainTextReader(DatasetReader):
def __init__(self):
super().__init__()
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._tokenizer = SpacyTokenizer()
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path) as input_file:
for line in input_file:
yield self.text_to_instance(line)
def text_to_instance(self, line: str) -> Instance: # type: ignore
tokens = self._tokenizer.tokenize(line)
return Instance({"line": TextField(tokens, self._token_indexers)})
class TestInterleavingDatasetReader(AllenNlpTestCase):
def test_round_robin(self):
readers = {"a": PlainTextReader(), "b": PlainTextReader(), "c": PlainTextReader()}
reader = InterleavingDatasetReader(readers)
data_dir = self.FIXTURES_ROOT / "data"
file_path = f"""{{
"a": "{data_dir / 'babi.txt'}",
"b": "{data_dir / 'conll2003.txt'}",
"c": "{data_dir / 'conll2003.txt'}"
}}"""
instances = list(reader.read(file_path))
first_three_keys = {instance.fields["dataset"].metadata for instance in instances[:3]}
assert first_three_keys == {"a", "b", "c"}
next_three_keys = {instance.fields["dataset"].metadata for instance in instances[3:6]}
assert next_three_keys == {"a", "b", "c"}
def test_all_at_once(self):
readers = {"f": PlainTextReader(), "g": PlainTextReader(), "h": PlainTextReader()}
reader = InterleavingDatasetReader(
readers, dataset_field_name="source", scheme="all_at_once"
)
data_dir = self.FIXTURES_ROOT / "data"
file_path = f"""{{
"f": "{data_dir / 'babi.txt'}",
"g": "{data_dir / 'conll2003.txt'}",
"h": "{data_dir / 'conll2003.txt'}"
}}"""
buckets = []
last_source = None
# Fill up a bucket until the source changes, then start a new one
for instance in reader.read(file_path):
source = instance.fields["source"].metadata
if source != last_source:
buckets.append([])
last_source = source
buckets[-1].append(instance)
# should be in 3 buckets
assert len(buckets) == 3
| allennlp-master | tests/data/dataset_readers/interleaving_dataset_reader_test.py |
import pytest
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestSequenceTaggingDatasetReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_default_format(self, lazy):
reader = SequenceTaggingDatasetReader(lazy=lazy)
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
instances = ensure_list(instances)
assert len(instances) == 4
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
def test_brown_corpus_format(self):
reader = SequenceTaggingDatasetReader(word_tag_delimiter="/")
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "brown_corpus.txt")
instances = ensure_list(instances)
assert len(instances) == 4
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
| allennlp-master | tests/data/dataset_readers/sequence_tagging_test.py |
from typing import Iterable, List
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import ensure_list
class LazyDatasetReader(DatasetReader):
def __init__(self, instances: List[Instance], lazy: bool) -> None:
super().__init__()
self.lazy = lazy
self._instances = instances
self.num_reads = 0
def _read(self, _: str) -> Iterable[Instance]:
self.num_reads += 1
return (instance for instance in self._instances)
class TestLazyDatasetReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
token_indexer = {"tokens": SingleIdTokenIndexer()}
field1 = TextField([Token(t) for t in ["this", "is", "a", "sentence", "."]], token_indexer)
field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence", "."]], token_indexer
)
field3 = TextField([Token(t) for t in ["here", "is", "a", "sentence", "."]], token_indexer)
field4 = TextField([Token(t) for t in ["this", "is", "short"]], token_indexer)
self.instances = [
Instance({"text1": field1, "text2": field2}),
Instance({"text1": field3, "text2": field4}),
]
def test_lazy(self):
reader = LazyDatasetReader(self.instances, lazy=True)
assert reader.num_reads == 0
instances = reader.read("path/to/file")
for _ in range(10):
_instances = (i for i in instances)
assert ensure_list(_instances) == self.instances
assert reader.num_reads == 10
def test_non_lazy(self):
reader = LazyDatasetReader(self.instances, lazy=False)
assert reader.num_reads == 0
instances = reader.read("path/to/file")
for _ in range(10):
_instances = (i for i in instances)
assert ensure_list(_instances) == self.instances
assert reader.num_reads == 1
| allennlp-master | tests/data/dataset_readers/lazy_dataset_reader_test.py |
from collections import deque
import os
import shutil
from typing import Optional, NamedTuple, List
from filelock import FileLock
import pytest
import torch.distributed as dist
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common import util as common_util
from allennlp.common.checks import ConfigurationError
from allennlp.data import Instance
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers import (
dataset_reader,
DatasetReader,
TextClassificationJsonReader,
)
from allennlp.data.dataset_readers.dataset_reader import AllennlpLazyDataset
from allennlp.data.fields import LabelField
def mock_collate_fn(item):
return item[0]
class TestDatasetReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.cache_directory = str(AllenNlpTestCase.FIXTURES_ROOT / "data_cache" / "with_prefix")
def teardown_method(self):
super().teardown_method()
if os.path.exists(self.cache_directory):
shutil.rmtree(self.cache_directory)
def test_lazy_dataset_can_be_iterated_through_multiple_times(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=True)
instances = reader.read(data_file)
assert isinstance(instances, AllennlpLazyDataset)
first_pass_instances = list(instances)
assert len(first_pass_instances) > 2
second_pass_instances = list(instances)
assert first_pass_instances == second_pass_instances
def test_read_only_creates_cache_file_once(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(str(data_file))
# The first read will create the cache.
reader.read(data_file)
assert os.path.exists(cache_file)
with open(cache_file, "r") as in_file:
cache_contents = in_file.read()
# The second and all subsequent reads should _use_ the cache, not modify it. I looked
# into checking file modification times, but this test will probably be faster than the
# granularity of `os.path.getmtime()` (which only returns values in seconds).
reader.read(data_file)
reader.read(data_file)
reader.read(data_file)
reader.read(data_file)
with open(cache_file, "r") as in_file:
final_cache_contents = in_file.read()
assert cache_contents == final_cache_contents
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_works_with_lazy_reading(self, caplog, lazy: bool):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
snli_copy_file = str(data_file) + ".copy"
shutil.copyfile(data_file, snli_copy_file)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(snli_copy_file)
# The call to read() will give us an _iterator_. We'll iterate over it multiple times,
# and the caching behavior should change as we go.
assert not os.path.exists(cache_file)
instances = reader.read(snli_copy_file)
# The first iteration will create the cache
first_pass_instances = []
for instance in instances:
first_pass_instances.append(instance)
assert "Caching instances to temp file" in " ".join([rec.message for rec in caplog.records])
assert os.path.exists(cache_file)
# Now we _remove_ the data file, to be sure we're reading from the cache.
os.remove(snli_copy_file)
caplog.clear()
instances = reader.read(snli_copy_file)
second_pass_instances = []
for instance in instances:
second_pass_instances.append(instance)
assert "Reading instances from cache" in " ".join([rec.message for rec in caplog.records])
# We should get the same instances both times.
assert len(first_pass_instances) == len(second_pass_instances)
for instance, cached_instance in zip(first_pass_instances, second_pass_instances):
assert instance.fields == cached_instance.fields
# And just to be super paranoid, in case the second pass somehow bypassed the cache
# because of a bug that's hard to detect, we'll read the
# instances from the cache with a non-lazy iterator and make sure they're the same.
reader = TextClassificationJsonReader(lazy=False, cache_directory=self.cache_directory)
cached_instances = reader.read(snli_copy_file)
assert len(first_pass_instances) == len(cached_instances)
for instance, cached_instance in zip(first_pass_instances, cached_instances):
assert instance.fields == cached_instance.fields
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_skipped_when_lock_not_acquired(self, caplog, lazy: bool):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
reader.CACHE_FILE_LOCK_TIMEOUT = 1
cache_file = reader._get_cache_location_for_file_path(str(data_file))
with FileLock(cache_file + ".lock"):
# Right now we hold the lock on the cache, so the reader shouldn't
# be able to write to it. It will wait for 1 second (because that's what
# we set the timeout to be), and then just read the instances as normal.
caplog.clear()
instances = list(reader.read(data_file))
assert "Failed to acquire lock" in caplog.text
assert instances
# We didn't write to the cache because we couldn't acquire the file lock.
assert not os.path.exists(cache_file)
# Now we'll write to the cache and then try the same thing again, this
# time making sure that we can still successfully read without the cache
# when the lock can't be acquired.
deque(reader.read(data_file), maxlen=1)
assert os.path.exists(cache_file)
with FileLock(cache_file + ".lock"):
# Right now we hold the lock on the cache, so the reader shouldn't
# be able to write to it. It will wait for 1 second (because that's what
# we set the timeout to be), and then just read the instances as normal.
caplog.clear()
instances = list(reader.read(data_file))
assert "Failed to acquire lock" in caplog.text
assert instances
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_skipped_with_distributed_training(self, caplog, monkeypatch, lazy):
monkeypatch.setattr(common_util, "is_distributed", lambda: True)
monkeypatch.setattr(dist, "get_rank", lambda: 0)
monkeypatch.setattr(dist, "get_world_size", lambda: 1)
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(str(data_file))
deque(reader.read(data_file), maxlen=1)
assert not os.path.exists(cache_file)
assert "Can't cache data instances when there are multiple processes" in caplog.text
def test_caching_with_lazy_reader_in_multi_process_loader(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=True, cache_directory=self.cache_directory)
deque(
PyTorchDataLoader(reader.read(data_file), collate_fn=mock_collate_fn, num_workers=2),
maxlen=0,
)
# We shouldn't write to the cache when the data is being loaded from multiple
# processes.
cache_file = reader._get_cache_location_for_file_path(str(data_file))
assert not os.path.exists(cache_file)
# But try again from the main process and we should see the cache file.
instances = list(reader.read(data_file))
assert instances
assert os.path.exists(cache_file)
# Reading again from a multi-process loader should read from the cache.
new_instances = list(
PyTorchDataLoader(reader.read(data_file), collate_fn=mock_collate_fn, num_workers=2)
)
assert len(instances) == len(new_instances)
@pytest.mark.parametrize("lazy", (True, False))
def test_max_instances(self, lazy):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(max_instances=2, lazy=lazy)
instances = reader.read(data_file)
instance_count = sum(1 for _ in instances)
assert instance_count == 2
@pytest.mark.parametrize("num_workers", (0, 1, 2))
def test_max_instances_with_multi_process_loader(self, num_workers):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(max_instances=2, lazy=True)
instances = list(
PyTorchDataLoader(
reader.read(data_file), collate_fn=mock_collate_fn, num_workers=num_workers
)
)
assert len(instances) == 2
@pytest.mark.parametrize("lazy", (True, False))
def test_cached_max_instances(self, lazy):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
# If we try reading with max instances, it shouldn't write to the cache.
reader = TextClassificationJsonReader(
cache_directory=self.cache_directory, lazy=lazy, max_instances=2
)
instances = list(reader.read(data_file))
assert len(instances) == 2
cache_file = reader._get_cache_location_for_file_path(str(data_file))
assert not os.path.exists(cache_file)
# Now reading again with no max_instances specified should create the cache.
reader = TextClassificationJsonReader(cache_directory=self.cache_directory, lazy=lazy)
instances = list(reader.read(data_file))
assert len(instances) > 2
assert os.path.exists(cache_file)
# The second read should only return two instances, even though it's from the cache.
reader = TextClassificationJsonReader(
cache_directory=self.cache_directory, max_instances=2, lazy=lazy
)
instances = list(reader.read(data_file))
assert len(instances) == 2
class MockWorkerInfo(NamedTuple):
id: int
num_workers: int
class MockDatasetReader(DatasetReader):
def _read(self, file_path):
for i in range(10):
yield self.text_to_instance(i)
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
@pytest.mark.parametrize(
"node_rank, world_size, worker_id, num_workers, max_instances, expected_result",
[
(None, None, None, None, None, list(range(10))),
(None, None, None, None, 5, list(range(5))),
(None, None, None, None, 12, list(range(10))),
(None, None, 0, 1, None, list(range(10))),
(None, None, 0, 2, None, [0, 2, 4, 6, 8]),
(None, None, 1, 2, None, [1, 3, 5, 7, 9]),
(None, None, 0, 2, 5, [0, 2, 4]),
(None, None, 1, 2, 5, [1, 3]),
(0, 1, None, None, None, list(range(10))),
(0, 2, None, None, None, [0, 2, 4, 6, 8]),
(1, 2, None, None, None, [1, 3, 5, 7, 9]),
(0, 2, None, None, 5, [0, 2, 4]),
(1, 2, None, None, 5, [1, 3]),
(0, 2, 0, 2, None, [0, 4, 8]),
(0, 2, 1, 2, None, [1, 5, 9]),
(1, 2, 0, 2, None, [2, 6]),
(1, 2, 1, 2, None, [3, 7]),
(0, 2, 0, 2, 5, [0, 4]),
],
)
def test_instance_slicing(
monkeypatch,
node_rank: Optional[int],
world_size: Optional[int],
worker_id: Optional[int],
num_workers: Optional[int],
max_instances: Optional[int],
expected_result: List[int],
):
if node_rank is not None and world_size is not None:
monkeypatch.setattr(common_util, "is_distributed", lambda: True)
monkeypatch.setattr(dist, "get_rank", lambda: node_rank)
monkeypatch.setattr(dist, "get_world_size", lambda: world_size)
if worker_id is not None and num_workers is not None:
monkeypatch.setattr(
dataset_reader, "get_worker_info", lambda: MockWorkerInfo(worker_id, num_workers)
)
reader = MockDatasetReader(max_instances=max_instances)
result = list((x["index"].label for x in reader.read("the-path-doesnt-matter"))) # type: ignore
assert result == expected_result
class BadLazyReader(DatasetReader):
def _read(self, file_path):
return [self.text_to_instance(i) for i in range(10)]
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def test_config_error_when_lazy_reader_returns_list():
reader = BadLazyReader(lazy=True)
with pytest.raises(ConfigurationError, match="must return a generator"):
deque(reader.read("path"), maxlen=0)
class BadReaderReadsNothing(DatasetReader):
def _read(self, file_path):
return []
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def test_config_error_when_reader_returns_no_instances():
reader = BadReaderReadsNothing()
with pytest.raises(ConfigurationError, match="No instances were read"):
deque(reader.read("path"), maxlen=0)
class BadReaderForgetsToSetLazy(DatasetReader):
def __init__(self):
pass
def _read(self, file_path):
for i in range(10):
yield self.text_to_instance(i)
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def warning_when_reader_has_no_lazy_set():
with pytest.warns(UserWarning, match="DatasetReader.lazy is not set"):
reader = BadReaderForgetsToSetLazy()
reader.read("path")
| allennlp-master | tests/data/dataset_readers/dataset_reader_test.py |
allennlp-master | tests/data/dataset_readers/__init__.py |
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.data.dataset_readers import BabiReader
from allennlp.common.testing import AllenNlpTestCase
class TestBAbIReader:
@pytest.mark.parametrize(
"keep_sentences, lazy", [(False, False), (False, True), (True, False), (True, True)]
)
def test_read_from_file(self, keep_sentences, lazy):
reader = BabiReader(keep_sentences=keep_sentences, lazy=lazy)
instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "babi.txt"))
assert len(instances) == 8
if keep_sentences:
assert [t.text for t in instances[0].fields["context"][3].tokens[3:]] == [
"of",
"wolves",
".",
]
assert [t.sequence_index for t in instances[0].fields["supports"]] == [0, 1]
else:
assert [t.text for t in instances[0].fields["context"].tokens[7:9]] == ["afraid", "of"]
def test_can_build_from_params(self):
reader = BabiReader.from_params(Params({"keep_sentences": True}))
assert reader._keep_sentences
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
| allennlp-master | tests/data/dataset_readers/babi_reader_test.py |
import pytest
from typing import List
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
from allennlp.common.util import get_spacy_model
class TestTextClassificationJsonReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_set_skip_indexing_true(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy, skip_label_indexing=True)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "integer_labels.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["This", "text", "has", "label", "0"], "label": 0}
instance2 = {"tokens": ["This", "text", "has", "label", "1"], "label": 1}
assert len(instances) == 2
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
with pytest.raises(ValueError) as exec_info:
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
ensure_list(reader.read(ag_path))
assert str(exec_info.value) == "Labels must be integers if skip_label_indexing is True."
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {
"tokens": [
"Memphis",
"Rout",
"Still",
"Stings",
"for",
"No",
".",
"14",
"Louisville",
";",
"Coach",
"Petrino",
"Vows",
"to",
"Have",
"Team",
"Better",
"Prepared",
".",
"NASHVILLE",
",",
"Tenn.",
"Nov",
"3",
",",
"2004",
"-",
"Louisville",
"#",
"39;s",
"30-point",
"loss",
"at",
"home",
"to",
"Memphis",
"last",
"season",
"is",
"still",
"a",
"painful",
"memory",
"for",
"the",
"Cardinals",
".",
],
"label": "2",
}
instance2 = {
"tokens": [
"AP",
"-",
"Eli",
"Manning",
"has",
"replaced",
"Kurt",
"Warner",
"as",
"the",
"New",
"York",
"Giants",
"'",
"starting",
"quarterback",
".",
],
"label": "2",
}
instance3 = {
"tokens": [
"A",
"conference",
"dedicated",
"to",
"online",
"journalism",
"explores",
"the",
"effect",
"blogs",
"have",
"on",
"news",
"reporting",
".",
"Some",
"say",
"they",
"draw",
"attention",
"to",
"under",
"-",
"reported",
"stories",
".",
"Others",
"struggle",
"to",
"establish",
"the",
"credibility",
"enjoyed",
"by",
"professionals",
".",
],
"label": "4",
}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus_and_truncates_properly(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy, max_sequence_length=5)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["Memphis", "Rout", "Still", "Stings", "for"], "label": "2"}
instance2 = {"tokens": ["AP", "-", "Eli", "Manning", "has"], "label": "2"}
instance3 = {"tokens": ["A", "conference", "dedicated", "to", "online"], "label": "4"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
@pytest.mark.parametrize("max_sequence_length", (None, 5))
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus_and_segments_sentences_properly(
self, lazy, max_sequence_length
):
reader = TextClassificationJsonReader(
lazy=lazy, segment_sentences=True, max_sequence_length=max_sequence_length
)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
splitter = SpacySentenceSplitter()
spacy_tokenizer = get_spacy_model("en_core_web_sm", False, False, False)
text1 = (
"Memphis Rout Still Stings for No. 14 Louisville; Coach "
"Petrino Vows to Have Team Better Prepared. NASHVILLE, "
"Tenn. Nov 3, 2004 - Louisville #39;s 30-point loss "
"at home to Memphis last season is still a painful memory "
"for the Cardinals."
)
instance1 = {"text": text1, "label": "2"}
text2 = (
"AP - Eli Manning has replaced Kurt Warner as the New York"
" Giants' starting quarterback."
)
instance2 = {"text": text2, "label": "2"}
text3 = (
"A conference dedicated to online journalism explores the "
"effect blogs have on news reporting. Some say they draw "
"attention to under-reported stories. Others struggle to "
"establish the credibility enjoyed by professionals."
)
instance3 = {"text": text3, "label": "4"}
for instance in [instance1, instance2, instance3]:
sentences = splitter.split_sentences(instance["text"])
tokenized_sentences: List[List[str]] = []
for sentence in sentences:
tokens = [token.text for token in spacy_tokenizer(sentence)]
if max_sequence_length:
tokens = tokens[:max_sequence_length]
tokenized_sentences.append(tokens)
instance["tokens"] = tokenized_sentences
assert len(instances) == 3
fields = instances[0].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance3["tokens"]
assert fields["label"].label == instance3["label"]
| allennlp-master | tests/data/dataset_readers/text_classification_json_test.py |
import glob
import os
import tarfile
from collections import Counter
from typing import Tuple
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import (
SequenceTaggingDatasetReader,
ShardedDatasetReader,
DatasetReader,
)
from allennlp.data.instance import Instance
def fingerprint(instance: Instance) -> Tuple[str, ...]:
"""
Get a hashable representation of a sequence tagging instance
that can be put in a Counter.
"""
text_tuple = tuple(t.text for t in instance.fields["tokens"].tokens) # type: ignore
labels_tuple = tuple(instance.fields["tags"].labels) # type: ignore
return text_tuple + labels_tuple
def test_exception_raised_when_base_reader_implements_sharding():
class ManuallyShardedBaseReader(DatasetReader):
def __init__(self, **kwargs):
super().__init__(manual_distributed_sharding=True, **kwargs)
def _read(self, file_path: str):
pass
def text_to_instance(self, text: str): # type: ignore
pass
with pytest.raises(ValueError, match="should not implement manual distributed sharding"):
ShardedDatasetReader(ManuallyShardedBaseReader())
class TestShardedDatasetReader(AllenNlpTestCase):
def setup_method(self) -> None:
super().setup_method()
# use SequenceTaggingDatasetReader as the base reader
self.base_reader = SequenceTaggingDatasetReader(lazy=True)
base_file_path = AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
# Make 100 copies of the data
raw_data = open(base_file_path).read()
for i in range(100):
file_path = self.TEST_DIR / f"identical_{i}.tsv"
with open(file_path, "w") as f:
f.write(raw_data)
self.identical_files_glob = str(self.TEST_DIR / "identical_*.tsv")
# Also create an archive with all of these files to ensure that we can
# pass the archive directory.
current_dir = os.getcwd()
os.chdir(self.TEST_DIR)
self.archive_filename = self.TEST_DIR / "all_data.tar.gz"
with tarfile.open(self.archive_filename, "w:gz") as archive:
for file_path in glob.glob("identical_*.tsv"):
archive.add(file_path)
os.chdir(current_dir)
self.reader = ShardedDatasetReader(base_reader=self.base_reader)
def read_and_check_instances(self, filepath: str):
all_instances = []
for instance in self.reader.read(filepath):
all_instances.append(instance)
# 100 files * 4 sentences / file
assert len(all_instances) == 100 * 4
counts = Counter(fingerprint(instance) for instance in all_instances)
# should have the exact same data 100 times
assert len(counts) == 4
assert counts[("cats", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("dogs", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("snakes", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("birds", "are", "animals", ".", "N", "V", "N", "N")] == 100
def test_sharded_read_glob(self):
self.read_and_check_instances(self.identical_files_glob)
def test_sharded_read_archive(self):
self.read_and_check_instances(str(self.archive_filename))
def test_attributes_inheritance(self):
# current reader has lazy set to true
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader)
assert (
reader.lazy
), "The ShardedDatasetReader didn't inherit the 'lazy' attribute from base_reader"
def test_set_attributes_main(self):
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader, lazy=False)
assert (
not reader.lazy
), "The ShardedDatasetReader inherited the 'lazy' attribute from base_reader. It should be False"
| allennlp-master | tests/data/dataset_readers/sharded_dataset_reader_test.py |
from typing import List
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers.dataset_utils import span_utils
from allennlp.data.tokenizers import Token, SpacyTokenizer
class SpanUtilsTest(AllenNlpTestCase):
def test_bio_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "B-ARG1", "B-ARG2"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG2", (4, 5)),
("ARG1", (6, 6)),
("ARG2", (7, 7)),
}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "U-ARG1", "U-ARG2"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bio_tags_to_spans(tag_sequence)
# Check that invalid BIO sequences are also handled as spans.
tag_sequence = [
"O",
"B-ARG1",
"I-ARG1",
"O",
"I-ARG1",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"I-ARG2",
"I-ARG2",
]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG2", (5, 6)),
("ARG1", (7, 7)),
("ARG1", (4, 4)),
("ARG2", (8, 9)),
}
def test_bio_tags_to_spans_extracts_correct_spans_without_labels(self):
tag_sequence = ["O", "B", "I", "O", "B", "I", "B", "B"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 5)), ("", (6, 6)), ("", (7, 7))}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B", "I", "O", "B", "I", "U", "U"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bio_tags_to_spans(tag_sequence)
# Check that invalid BIO sequences are also handled as spans.
tag_sequence = ["O", "B", "I", "O", "I", "B", "I", "B", "I", "I"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 4)), ("", (5, 6)), ("", (7, 9))}
def test_bio_tags_to_spans_ignores_specified_tags(self):
tag_sequence = [
"B-V",
"I-V",
"O",
"B-ARG1",
"I-ARG1",
"O",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"B-ARG2",
]
spans = span_utils.bio_tags_to_spans(tag_sequence, ["ARG1", "V"])
assert set(spans) == {("ARG2", (6, 7)), ("ARG2", (9, 9))}
def test_iob1_tags_to_spans_extracts_correct_spans_without_labels(self):
tag_sequence = ["I", "B", "I", "O", "B", "I", "B", "B"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2)), ("", (4, 5)), ("", (6, 6)), ("", (7, 7))}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B", "I", "O", "B", "I", "U", "U"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.iob1_tags_to_spans(tag_sequence)
# Check that invalid IOB1 sequences are also handled as spans.
tag_sequence = ["O", "B", "I", "O", "I", "B", "I", "B", "I", "I"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 4)), ("", (5, 6)), ("", (7, 9))}
def test_iob1_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["I-ARG2", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "B-ARG1", "B-ARG2"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG2", (0, 0)),
("ARG1", (1, 2)),
("ARG2", (4, 5)),
("ARG1", (6, 6)),
("ARG2", (7, 7)),
}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "U-ARG1", "U-ARG2"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.iob1_tags_to_spans(tag_sequence)
# Check that invalid IOB1 sequences are also handled as spans.
tag_sequence = [
"O",
"B-ARG1",
"I-ARG1",
"O",
"I-ARG1",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"I-ARG2",
"I-ARG2",
]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG1", (4, 4)),
("ARG2", (5, 6)),
("ARG1", (7, 7)),
("ARG2", (8, 9)),
}
def test_enumerate_spans_enumerates_all_spans(self):
tokenizer = SpacyTokenizer(pos_tags=True)
sentence = tokenizer.tokenize("This is a sentence.")
spans = span_utils.enumerate_spans(sentence)
assert spans == [
(0, 0),
(0, 1),
(0, 2),
(0, 3),
(0, 4),
(1, 1),
(1, 2),
(1, 3),
(1, 4),
(2, 2),
(2, 3),
(2, 4),
(3, 3),
(3, 4),
(4, 4),
]
spans = span_utils.enumerate_spans(sentence, max_span_width=3, min_span_width=2)
assert spans == [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (2, 4), (3, 4)]
spans = span_utils.enumerate_spans(sentence, max_span_width=3, min_span_width=2, offset=20)
assert spans == [(20, 21), (20, 22), (21, 22), (21, 23), (22, 23), (22, 24), (23, 24)]
def no_prefixed_punctuation(tokens: List[Token]):
# Only include spans which don't start or end with punctuation.
return tokens[0].pos_ != "PUNCT" and tokens[-1].pos_ != "PUNCT"
spans = span_utils.enumerate_spans(
sentence, max_span_width=3, min_span_width=2, filter_function=no_prefixed_punctuation
)
# No longer includes (2, 4) or (3, 4) as these include punctuation
# as their last element.
assert spans == [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]
def test_bioul_tags_to_spans(self):
tag_sequence = ["B-PER", "I-PER", "L-PER", "U-PER", "U-LOC", "O"]
spans = span_utils.bioul_tags_to_spans(tag_sequence)
assert spans == [("PER", (0, 2)), ("PER", (3, 3)), ("LOC", (4, 4))]
tag_sequence = ["B-PER", "I-PER", "O"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bioul_tags_to_spans(tag_sequence)
def test_bioul_tags_to_spans_without_labels(self):
tag_sequence = ["B", "I", "L", "U", "U", "O"]
spans = span_utils.bioul_tags_to_spans(tag_sequence)
assert spans == [("", (0, 2)), ("", (3, 3)), ("", (4, 4))]
tag_sequence = ["B", "I", "O"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bioul_tags_to_spans(tag_sequence)
def test_iob1_to_bioul(self):
tag_sequence = ["I-ORG", "O", "I-MISC", "O"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="IOB1")
assert bioul_sequence == ["U-ORG", "O", "U-MISC", "O"]
tag_sequence = ["O", "I-PER", "B-PER", "I-PER", "I-PER", "B-PER"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="IOB1")
assert bioul_sequence == ["O", "U-PER", "B-PER", "I-PER", "L-PER", "U-PER"]
def test_bio_to_bioul(self):
tag_sequence = ["B-ORG", "O", "B-MISC", "O", "B-MISC", "I-MISC", "I-MISC"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="BIO")
assert bioul_sequence == ["U-ORG", "O", "U-MISC", "O", "B-MISC", "I-MISC", "L-MISC"]
# Encoding in IOB format should throw error with incorrect encoding.
with pytest.raises(span_utils.InvalidTagSequence):
tag_sequence = ["O", "I-PER", "B-PER", "I-PER", "I-PER", "B-PER"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="BIO")
def test_bmes_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["B-ARG1", "M-ARG1", "E-ARG1", "B-ARG2", "E-ARG2", "S-ARG3"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 2)), ("ARG2", (3, 4)), ("ARG3", (5, 5))}
tag_sequence = ["S-ARG1", "B-ARG2", "E-ARG2", "S-ARG3"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 2)), ("ARG3", (3, 3))}
# Invalid labels.
tag_sequence = ["B-ARG1", "M-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 1))}
tag_sequence = ["B-ARG1", "E-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 1))}
tag_sequence = ["B-ARG1", "M-ARG1", "M-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 1)), ("ARG2", (2, 2))}
tag_sequence = ["B-ARG1", "M-ARG1", "E-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 1)), ("ARG2", (2, 2))}
# Invalid transitions.
tag_sequence = ["B-ARG1", "B-ARG1"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG1", (1, 1))}
tag_sequence = ["B-ARG1", "S-ARG1"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG1", (1, 1))}
def test_bmes_tags_to_spans_extracts_correct_spans_without_labels(self):
# Good transitions.
tag_sequence = ["B", "M", "E", "B", "E", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 2)), ("", (3, 4)), ("", (5, 5))}
tag_sequence = ["S", "B", "E", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2)), ("", (3, 3))}
# Invalid transitions.
tag_sequence = ["B", "B", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2))}
tag_sequence = ["B", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
tag_sequence = ["M", "B", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2))}
tag_sequence = ["B", "M", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 2))}
tag_sequence = ["B", "E", "M", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 3))}
tag_sequence = ["B", "E", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 2))}
tag_sequence = ["S", "M"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
tag_sequence = ["S", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
| allennlp-master | tests/data/dataset_readers/dataset_utils/span_utils_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.token_indexers.spacy_indexer import SpacyTokenIndexer
from allennlp.data.fields.text_field import TextField
from allennlp.common.util import get_spacy_model
from allennlp.data.vocabulary import Vocabulary
class TestSpacyTokenIndexer(AllenNlpTestCase):
def test_as_array_produces_token_array(self):
indexer = SpacyTokenIndexer()
nlp = get_spacy_model("en_core_web_sm", pos_tags=True, parse=False, ner=False)
tokens = [t for t in nlp("This is a sentence.")]
field = TextField(tokens, token_indexers={"spacy": indexer})
vocab = Vocabulary()
field.index(vocab)
# Indexer functionality
array_dict = indexer.tokens_to_indices(tokens, vocab)
assert len(array_dict["tokens"]) == 5
assert len(array_dict["tokens"][0]) == 96
# Check it also works with field
lengths = field.get_padding_lengths()
array_dict = field.as_tensor(lengths)
assert list(array_dict["spacy"]["tokens"].shape) == [5, 96]
| allennlp-master | tests/data/token_indexers/spacy_indexer_test.py |
allennlp-master | tests/data/token_indexers/__init__.py |
|
import numpy as np
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.batch import Batch
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.fields import ListField, TextField
class TestELMoTokenCharactersIndexer(AllenNlpTestCase):
def test_bos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token("<S>")], Vocabulary())
expected_indices = [
259,
257,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_eos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token("</S>")], Vocabulary())
expected_indices = [
259,
258,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_unicode_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token(chr(256) + "t")], Vocabulary())
expected_indices = [
259,
197,
129,
117,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_elmo_as_array_produces_token_sequence(self):
indexer = ELMoTokenCharactersIndexer()
tokens = [Token("Second"), Token(".")]
indices = indexer.tokens_to_indices(tokens, Vocabulary())
padded_tokens = indexer.as_padded_tensor_dict(indices, padding_lengths={"elmo_tokens": 3})
expected_padded_tokens = [
[
259,
84,
102,
100,
112,
111,
101,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
],
[
259,
47,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
]
assert padded_tokens["elmo_tokens"].tolist() == expected_padded_tokens
def test_elmo_indexer_with_additional_tokens(self):
indexer = ELMoTokenCharactersIndexer(tokens_to_add={"<first>": 1})
tokens = [Token("<first>")]
indices = indexer.tokens_to_indices(tokens, Vocabulary())
expected_indices = [
[
259,
2,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
]
assert indices["elmo_tokens"] == expected_indices
def test_elmo_empty_token_list(self):
# Basic test
indexer = ELMoTokenCharactersIndexer()
assert {"elmo_tokens": []} == indexer.get_empty_token_list()
# Real world test
indexer = {"elmo": indexer}
tokens_1 = TextField([Token("Apple")], indexer)
targets_1 = ListField([TextField([Token("Apple")], indexer)])
tokens_2 = TextField([Token("Screen"), Token("device")], indexer)
targets_2 = ListField(
[TextField([Token("Screen")], indexer), TextField([Token("Device")], indexer)]
)
instance_1 = Instance({"tokens": tokens_1, "targets": targets_1})
instance_2 = Instance({"tokens": tokens_2, "targets": targets_2})
a_batch = Batch([instance_1, instance_2])
a_batch.index_instances(Vocabulary())
batch_tensor = a_batch.as_tensor_dict()
elmo_target_token_indices = batch_tensor["targets"]["elmo"]["elmo_tokens"]
# The TextField that is empty should have been created using the
# `get_empty_token_list` and then padded with zeros.
empty_target = elmo_target_token_indices[0][1].numpy()
np.testing.assert_array_equal(np.zeros((1, 50)), empty_target)
non_empty_targets = [
elmo_target_token_indices[0][0],
elmo_target_token_indices[1][0],
elmo_target_token_indices[1][1],
]
for non_empty_target in non_empty_targets:
with pytest.raises(AssertionError):
np.testing.assert_array_equal(np.zeros((1, 50)), non_empty_target)
| allennlp-master | tests/data/token_indexers/elmo_indexer_test.py |
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import PretrainedTransformerMismatchedIndexer
class TestPretrainedTransformerMismatchedIndexer(AllenNlpTestCase):
def test_bert(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
indexer = PretrainedTransformerMismatchedIndexer("bert-base-cased")
text = ["AllenNLP", "is", "great"]
tokens = tokenizer.tokenize(" ".join(["[CLS]"] + text + ["[SEP]"]))
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices([Token(word) for word in text], vocab)
assert indexed["token_ids"] == expected_ids
assert indexed["mask"] == [True] * len(text)
# Hardcoding a few things because we know how BERT tokenization works
assert indexed["offsets"] == [(1, 3), (4, 4), (5, 5)]
assert indexed["wordpiece_mask"] == [True] * len(expected_ids)
keys = indexed.keys()
assert indexer.get_empty_token_list() == {key: [] for key in keys}
max_length = 10
padding_lengths = {key: max_length for key in keys}
padded_tokens = indexer.as_padded_tensor_dict(indexed, padding_lengths)
for key in keys:
padding_length = max_length - len(indexed[key])
if key == "offsets":
padding = (0, 0)
elif "mask" in key:
padding = False
else:
padding = 0
expected_value = indexed[key] + ([padding] * padding_length)
assert len(padded_tokens[key]) == max_length
if key == "offsets":
expected_value = [list(t) for t in expected_value]
assert padded_tokens[key].tolist() == expected_value
def test_long_sequence_splitting(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased", max_length=4)
text = ["AllenNLP", "is", "great"]
tokens = tokenizer.tokenize(" ".join(["[CLS]"] + text + ["[SEP]"]))
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(expected_ids) == 7 # just to make sure it's what we're expecting
cls_id, sep_id = expected_ids[0], expected_ids[-1]
expected_ids = (
expected_ids[:3]
+ [sep_id, cls_id]
+ expected_ids[3:5]
+ [sep_id, cls_id]
+ expected_ids[5:]
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices([Token(word) for word in text], vocab)
assert indexed["token_ids"] == expected_ids
# [CLS] allen ##nl [SEP] [CLS] #p is [SEP] [CLS] great [SEP]
assert indexed["segment_concat_mask"] == [True] * len(expected_ids)
# allennlp is great
assert indexed["mask"] == [True] * len(text)
# [CLS] allen #nl #p is great [SEP]
assert indexed["wordpiece_mask"] == [True] * 7
| allennlp-master | tests/data/token_indexers/pretrained_transformer_mismatched_indexer_test.py |
from collections import defaultdict
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import TokenCharactersIndexer
from allennlp.data.tokenizers.character_tokenizer import CharacterTokenizer
class CharacterTokenIndexerTest(AllenNlpTestCase):
def test_count_vocab_items_respects_casing(self):
indexer = TokenCharactersIndexer("characters", min_padding_length=5)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["characters"] == {"h": 1, "H": 1, "e": 2, "l": 4, "o": 2}
indexer = TokenCharactersIndexer(
"characters", CharacterTokenizer(lowercase_characters=True), min_padding_length=5
)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["characters"] == {"h": 2, "e": 2, "l": 4, "o": 2}
def test_as_array_produces_token_sequence(self):
indexer = TokenCharactersIndexer("characters", min_padding_length=1)
padded_tokens = indexer.as_padded_tensor_dict(
{"token_characters": [[1, 2, 3, 4, 5], [1, 2, 3], [1]]},
padding_lengths={"token_characters": 4, "num_token_characters": 10},
)
assert padded_tokens["token_characters"].tolist() == [
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def test_tokens_to_indices_produces_correct_characters(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters")
vocab.add_token_to_namespace("s", namespace="characters")
vocab.add_token_to_namespace("e", namespace="characters")
vocab.add_token_to_namespace("n", namespace="characters")
vocab.add_token_to_namespace("t", namespace="characters")
vocab.add_token_to_namespace("c", namespace="characters")
indexer = TokenCharactersIndexer("characters", min_padding_length=1)
indices = indexer.tokens_to_indices([Token("sentential")], vocab)
assert indices == {"token_characters": [[3, 4, 5, 6, 4, 5, 6, 1, 1, 1]]}
def test_start_and_end_tokens(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters") # 2
vocab.add_token_to_namespace("s", namespace="characters") # 3
vocab.add_token_to_namespace("e", namespace="characters") # 4
vocab.add_token_to_namespace("n", namespace="characters") # 5
vocab.add_token_to_namespace("t", namespace="characters") # 6
vocab.add_token_to_namespace("c", namespace="characters") # 7
vocab.add_token_to_namespace("<", namespace="characters") # 8
vocab.add_token_to_namespace(">", namespace="characters") # 9
vocab.add_token_to_namespace("/", namespace="characters") # 10
indexer = TokenCharactersIndexer(
"characters", start_tokens=["<s>"], end_tokens=["</s>"], min_padding_length=1
)
indices = indexer.tokens_to_indices([Token("sentential")], vocab)
assert indices == {
"token_characters": [[8, 3, 9], [3, 4, 5, 6, 4, 5, 6, 1, 1, 1], [8, 10, 3, 9]]
}
def test_min_padding_length(self):
sentence = "AllenNLP is awesome ."
tokens = [Token(token) for token in sentence.split(" ")]
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters") # 2
vocab.add_token_to_namespace("l", namespace="characters") # 3
vocab.add_token_to_namespace("e", namespace="characters") # 4
vocab.add_token_to_namespace("n", namespace="characters") # 5
vocab.add_token_to_namespace("N", namespace="characters") # 6
vocab.add_token_to_namespace("L", namespace="characters") # 7
vocab.add_token_to_namespace("P", namespace="characters") # 8
vocab.add_token_to_namespace("i", namespace="characters") # 9
vocab.add_token_to_namespace("s", namespace="characters") # 10
vocab.add_token_to_namespace("a", namespace="characters") # 11
vocab.add_token_to_namespace("w", namespace="characters") # 12
vocab.add_token_to_namespace("o", namespace="characters") # 13
vocab.add_token_to_namespace("m", namespace="characters") # 14
vocab.add_token_to_namespace(".", namespace="characters") # 15
indexer = TokenCharactersIndexer("characters", min_padding_length=10)
indices = indexer.tokens_to_indices(tokens, vocab)
padded = indexer.as_padded_tensor_dict(indices, indexer.get_padding_lengths(indices))
assert padded["token_characters"].tolist() == [
[2, 3, 3, 4, 5, 6, 7, 8, 0, 0],
[9, 10, 0, 0, 0, 0, 0, 0, 0, 0],
[11, 12, 4, 10, 13, 14, 4, 0, 0, 0],
[15, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def test_warn_min_padding_length(self):
with pytest.warns(
UserWarning, match=r"using the default value \(0\) of `min_padding_length`"
):
TokenCharactersIndexer("characters")
| allennlp-master | tests/data/token_indexers/character_token_indexer_test.py |
from collections import defaultdict
from dataclasses import dataclass
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import SpacyTokenizer
@dataclass(init=False)
class TokenWithStyle(Token):
__slots__ = ["is_bold"]
is_bold: bool
def __init__(self, text: str = None, is_bold: bool = False):
super().__init__(text=text)
self.is_bold = is_bold
class TestSingleIdTokenIndexer(AllenNlpTestCase):
def test_count_vocab_items_respects_casing(self):
indexer = SingleIdTokenIndexer("words")
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["words"] == {"hello": 1, "Hello": 1}
indexer = SingleIdTokenIndexer("words", lowercase_tokens=True)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["words"] == {"hello": 2}
def test_as_array_produces_token_sequence(self):
indexer = SingleIdTokenIndexer("words")
padded_tokens = indexer.as_padded_tensor_dict({"tokens": [1, 2, 3, 4, 5]}, {"tokens": 10})
assert padded_tokens["tokens"].tolist() == [1, 2, 3, 4, 5, 0, 0, 0, 0, 0]
def test_count_other_features(self):
indexer = SingleIdTokenIndexer("other_features", feature_name="is_bold")
counter = defaultdict(lambda: defaultdict(int))
token = TokenWithStyle("Header")
token.is_bold = "True"
indexer.count_vocab_items(token, counter)
assert counter["other_features"] == {"True": 1}
def test_count_vocab_items_with_non_default_feature_name(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [Token("<S>")] + [t for t in tokens] + [Token("</S>")]
indexer = SingleIdTokenIndexer(
namespace="dep_labels", feature_name="dep_", default_value="NONE"
)
counter = defaultdict(lambda: defaultdict(int))
for token in tokens:
indexer.count_vocab_items(token, counter)
assert counter["dep_labels"] == {
"ROOT": 1,
"nsubj": 1,
"det": 1,
"NONE": 2,
"attr": 1,
"punct": 1,
}
def test_tokens_to_indices_with_non_default_feature_name(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [t for t in tokens] + [Token("</S>")]
vocab = Vocabulary()
root_index = vocab.add_token_to_namespace("ROOT", namespace="dep_labels")
none_index = vocab.add_token_to_namespace("NONE", namespace="dep_labels")
indexer = SingleIdTokenIndexer(
namespace="dep_labels", feature_name="dep_", default_value="NONE"
)
assert indexer.tokens_to_indices([tokens[1]], vocab) == {"tokens": [root_index]}
assert indexer.tokens_to_indices([tokens[-1]], vocab) == {"tokens": [none_index]}
def test_crashes_with_empty_feature_value_and_no_default(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [t for t in tokens] + [Token("</S>")]
vocab = Vocabulary()
vocab.add_token_to_namespace("ROOT", namespace="dep_labels")
vocab.add_token_to_namespace("NONE", namespace="dep_labels")
indexer = SingleIdTokenIndexer(namespace="dep_labels", feature_name="dep_")
with pytest.raises(ValueError):
indexer.tokens_to_indices([tokens[-1]], vocab)
def test_no_namespace_means_no_counting(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [Token("<S>")] + [t for t in tokens] + [Token("</S>")]
indexer = SingleIdTokenIndexer(namespace=None, feature_name="text_id")
def fail():
assert False
counter = defaultdict(fail)
for token in tokens:
indexer.count_vocab_items(token, counter)
def test_no_namespace_means_no_indexing(self):
indexer = SingleIdTokenIndexer(namespace=None, feature_name="text_id")
assert indexer.tokens_to_indices([Token(text_id=23)], None) == {"tokens": [23]}
| allennlp-master | tests/data/token_indexers/single_id_token_indexer_test.py |
import pytest
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerIndexer(AllenNlpTestCase):
def test_as_array_produces_token_sequence_bert_uncased(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased")
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_bert_cased(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-cased")
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_bert_cased_sentence_pair(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
allennlp_tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", add_special_tokens=False
)
indexer = PretrainedTransformerIndexer(model_name="bert-base-cased")
default_format = "[CLS] AllenNLP is great! [SEP] Really it is! [SEP]"
tokens = tokenizer.tokenize(default_format)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
allennlp_tokens = allennlp_tokenizer.add_special_tokens(
allennlp_tokenizer.tokenize("AllenNLP is great!"),
allennlp_tokenizer.tokenize("Really it is!"),
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_roberta(self):
tokenizer = cached_transformers.get_tokenizer("roberta-base")
allennlp_tokenizer = PretrainedTransformerTokenizer("roberta-base")
indexer = PretrainedTransformerIndexer(model_name="roberta-base")
string_specials = "<s>AllenNLP is great</s>"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_roberta_sentence_pair(self):
tokenizer = cached_transformers.get_tokenizer("roberta-base")
allennlp_tokenizer = PretrainedTransformerTokenizer(
"roberta-base", add_special_tokens=False
)
indexer = PretrainedTransformerIndexer(model_name="roberta-base")
default_format = "<s>AllenNLP is great!</s></s>Really it is!</s>"
tokens = tokenizer.tokenize(default_format)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
allennlp_tokens = allennlp_tokenizer.add_special_tokens(
allennlp_tokenizer.tokenize("AllenNLP is great!"),
allennlp_tokenizer.tokenize("Really it is!"),
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids, f"{allennlp_tokens}\n{tokens}"
@pytest.mark.parametrize("model_name", ["roberta-base", "bert-base-cased", "xlm-mlm-ende-1024"])
def test_transformers_vocab_sizes(self, model_name):
namespace = "tags"
tokenizer = cached_transformers.get_tokenizer(model_name)
allennlp_tokenizer = PretrainedTransformerTokenizer(model_name)
indexer = PretrainedTransformerIndexer(model_name=model_name, namespace=namespace)
allennlp_tokens = allennlp_tokenizer.tokenize("AllenNLP is great!")
vocab = Vocabulary()
# here we copy entire transformers vocab
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
del indexed
assert vocab.get_vocab_size(namespace=namespace) == tokenizer.vocab_size
def test_transformers_vocabs_added_correctly(self):
namespace, model_name = "tags", "roberta-base"
tokenizer = cached_transformers.get_tokenizer(model_name, use_fast=False)
allennlp_tokenizer = PretrainedTransformerTokenizer(model_name)
indexer = PretrainedTransformerIndexer(model_name=model_name, namespace=namespace)
allennlp_tokens = allennlp_tokenizer.tokenize("AllenNLP is great!")
vocab = Vocabulary()
# here we copy entire transformers vocab
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
del indexed
assert vocab.get_token_to_index_vocabulary(namespace=namespace) == tokenizer.encoder
def test_mask(self):
# We try these models, because
# - BERT pads tokens with 0
# - RoBERTa pads tokens with 1
# - GPT2 has no padding token, so we choose 0
for model in ["bert-base-uncased", "roberta-base", "gpt2"]:
allennlp_tokenizer = PretrainedTransformerTokenizer(model)
indexer = PretrainedTransformerIndexer(model_name=model)
string_no_specials = "AllenNLP is great"
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
expected_masks = [True] * len(indexed["token_ids"])
assert indexed["mask"] == expected_masks
max_length = 10
padding_lengths = {key: max_length for key in indexed.keys()}
padded_tokens = indexer.as_padded_tensor_dict(indexed, padding_lengths)
padding_length = max_length - len(indexed["mask"])
expected_masks = expected_masks + ([False] * padding_length)
assert len(padded_tokens["mask"]) == max_length
assert padded_tokens["mask"].tolist() == expected_masks
assert len(padded_tokens["token_ids"]) == max_length
pad_token_id = allennlp_tokenizer.tokenizer.pad_token_id
if pad_token_id is None:
pad_token_id = 0
padding_suffix = [pad_token_id] * padding_length
assert padded_tokens["token_ids"][-padding_length:].tolist() == padding_suffix
def test_long_sequence_splitting(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased", max_length=4)
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(expected_ids) == 7 # just to make sure it's what we're expecting
cls_id, sep_id = expected_ids[0], expected_ids[-1]
expected_ids = (
expected_ids[:3]
+ [sep_id, cls_id]
+ expected_ids[3:5]
+ [sep_id, cls_id]
+ expected_ids[5:]
)
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
assert indexed["segment_concat_mask"] == [True] * len(expected_ids)
assert indexed["mask"] == [True] * 7 # original length
@staticmethod
def _assert_tokens_equal(expected_tokens, actual_tokens):
for expected, actual in zip(expected_tokens, actual_tokens):
assert expected.text == actual.text
assert expected.text_id == actual.text_id
assert expected.type_id == actual.type_id
def test_indices_to_tokens(self):
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer_max_length = PretrainedTransformerIndexer(
model_name="bert-base-uncased", max_length=4
)
indexer_no_max_length = PretrainedTransformerIndexer(model_name="bert-base-uncased")
string_no_specials = "AllenNLP is great"
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer_no_max_length.tokens_to_indices(allennlp_tokens, vocab)
tokens_from_indices = indexer_no_max_length.indices_to_tokens(indexed, vocab)
self._assert_tokens_equal(allennlp_tokens, tokens_from_indices)
indexed = indexer_max_length.tokens_to_indices(allennlp_tokens, vocab)
tokens_from_indices = indexer_max_length.indices_to_tokens(indexed, vocab)
# For now we are not removing special tokens introduced from max_length
sep_cls = [allennlp_tokens[-1], allennlp_tokens[0]]
expected = (
allennlp_tokens[:3] + sep_cls + allennlp_tokens[3:5] + sep_cls + allennlp_tokens[5:]
)
self._assert_tokens_equal(expected, tokens_from_indices)
| allennlp-master | tests/data/token_indexers/pretrained_transformer_indexer_test.py |