repo_id
stringlengths 18
103
| file_path
stringlengths 30
136
| content
stringlengths 2
3.36M
| __index_level_0__
int64 0
0
|
---|---|---|---|
coqui_public_repos/TTS/TTS/tts/layers | coqui_public_repos/TTS/TTS/tts/layers/xtts/perceiver_encoder.py | # Adapted from https://github.com/lucidrains/naturalspeech2-pytorch/blob/659bec7f7543e7747e809e950cc2f84242fbeec7/naturalspeech2_pytorch/naturalspeech2_pytorch.py#L532
from collections import namedtuple
from functools import wraps
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from packaging import version
from torch import einsum, nn
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(self, dropout=0.0, causal=False, use_flash=False):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash = use_flash
assert not (
use_flash and version.parse(torch.__version__) < version.parse("2.0.0")
), "in order to use flash attention, you must be using pytorch 2.0 or above"
# determine efficient attention configs for cuda and cpu
self.config = namedtuple("EfficientAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"])
self.cpu_config = self.config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash:
return
device_properties = torch.cuda.get_device_properties(torch.device("cuda"))
if device_properties.major == 8 and device_properties.minor == 0:
print_once("A100 GPU detected, using flash attention if input tensor is on cuda")
self.cuda_config = self.config(True, False, False)
else:
print_once("Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda")
self.cuda_config = self.config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask=None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, "b ... -> b 1 ...").expand_as(q)
if v.ndim == 3:
v = rearrange(v, "b ... -> b 1 ...").expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, "b j -> b 1 1 j")
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v, attn_mask=mask, dropout_p=self.dropout if self.training else 0.0, is_causal=self.causal
)
return out
def forward(self, q, k, v, mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash:
return self.flash_attn(q, k, v, mask=mask)
kv_einsum_eq = "b j d" if k.ndim == 3 else "b h j d"
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, "b j -> b 1 1 j")
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
class RMSNorm(nn.Module):
def __init__(self, dim, scale=True, dim_cond=None):
super().__init__()
self.cond = exists(dim_cond)
self.to_gamma_beta = nn.Linear(dim_cond, dim * 2) if self.cond else None
self.scale = dim**0.5
self.gamma = nn.Parameter(torch.ones(dim)) if scale else None
def forward(self, x, cond=None):
gamma = default(self.gamma, 1)
out = F.normalize(x, dim=-1) * self.scale * gamma
if not self.cond:
return out
assert exists(cond)
gamma, beta = self.to_gamma_beta(cond).chunk(2, dim=-1)
gamma, beta = map(lambda t: rearrange(t, "b d -> b 1 d"), (gamma, beta))
return out * gamma + beta
class CausalConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
(kernel_size,) = self.kernel_size
(dilation,) = self.dilation
(stride,) = self.stride
assert stride == 1
self.causal_padding = dilation * (kernel_size - 1)
def forward(self, x):
causal_padded_x = F.pad(x, (self.causal_padding, 0), value=0.0)
return super().forward(causal_padded_x)
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.gelu(gate) * x
def FeedForward(dim, mult=4, causal_conv=False):
dim_inner = int(dim * mult * 2 / 3)
conv = None
if causal_conv:
conv = nn.Sequential(
Rearrange("b n d -> b d n"),
CausalConv1d(dim_inner, dim_inner, 3),
Rearrange("b d n -> b n d"),
)
return Sequential(nn.Linear(dim, dim_inner * 2), GEGLU(), conv, nn.Linear(dim_inner, dim))
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth=2,
dim_context=None,
num_latents=32,
dim_head=64,
heads=8,
ff_mult=4,
use_flash_attn=False,
):
super().__init__()
dim_context = default(dim_context, dim)
self.proj_context = nn.Linear(dim_context, dim) if dim_context != dim else nn.Identity()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
nn.init.normal_(self.latents, std=0.02)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
Attention(
dim=dim,
dim_head=dim_head,
heads=heads,
use_flash=use_flash_attn,
cross_attn_include_queries=True,
),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
self.norm = RMSNorm(dim)
def forward(self, x, mask=None):
batch = x.shape[0]
x = self.proj_context(x)
latents = repeat(self.latents, "n d -> b n d", b=batch)
for attn, ff in self.layers:
latents = attn(latents, x, mask=mask) + latents
latents = ff(latents) + latents
return self.norm(latents)
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_context=None,
causal=False,
dim_head=64,
heads=8,
dropout=0.0,
use_flash=False,
cross_attn_include_queries=False,
):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
self.cross_attn_include_queries = cross_attn_include_queries
dim_inner = dim_head * heads
dim_context = default(dim_context, dim)
self.attend = Attend(causal=causal, dropout=dropout, use_flash=use_flash)
self.to_q = nn.Linear(dim, dim_inner, bias=False)
self.to_kv = nn.Linear(dim_context, dim_inner * 2, bias=False)
self.to_out = nn.Linear(dim_inner, dim, bias=False)
def forward(self, x, context=None, mask=None):
h, has_context = self.heads, exists(context)
context = default(context, x)
if has_context and self.cross_attn_include_queries:
context = torch.cat((x, context), dim=-2)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim=-1))
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
out = self.attend(q, k, v, mask=mask)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
| 0 |
coqui_public_repos/STT-models/czech/comodoro | coqui_public_repos/STT-models/czech/comodoro/v0.3.0/LICENSE | https://creativecommons.org/licenses/by-nc/4.0/legalcode
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstproject-main.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Projects a transduction onto its input or output language.
#include <cstring>
#include <memory>
#include <string>
#include <fst/flags.h>
#include <fst/script/getters.h>
#include <fst/script/project.h>
DECLARE_bool(project_output);
int fstproject_main(int argc, char **argv) {
namespace s = fst::script;
using fst::script::MutableFstClass;
string usage =
"Projects a transduction onto its input"
" or output language.\n\n Usage: ";
usage += argv[0];
usage += " [in.fst [out.fst]]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc > 3) {
ShowUsage();
return 1;
}
const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : "";
const string out_name = argc > 2 ? argv[2] : "";
std::unique_ptr<MutableFstClass> fst(MutableFstClass::Read(in_name, true));
if (!fst) return 1;
s::Project(fst.get(), s::GetProjectType(FLAGS_project_output));
return !fst->Write(out_name);
}
| 0 |
coqui_public_repos/STT/training/coqui_stt_training | coqui_public_repos/STT/training/coqui_stt_training/util/config.py | from __future__ import absolute_import, division, print_function
import json
import os
import sys
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import List
import progressbar
from attrdict import AttrDict
from coqpit import MISSING, Coqpit, check_argument
from coqui_stt_ctcdecoder import Alphabet, UTF8Alphabet
from .augmentations import NormalizeSampleRate, parse_augmentations
from .auto_input import create_alphabet_from_sources, create_datasets_from_auto_input
from .gpu import get_available_gpus
from .helpers import parse_file_size
from .io import is_remote_path, open_remote, path_exists_remote
class _ConfigSingleton:
_config = None
def __getattr__(self, name):
if _ConfigSingleton._config is None:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(_ConfigSingleton._config, name):
raise RuntimeError(
"Configuration option {} not found in config.".format(name)
)
return getattr(_ConfigSingleton._config, name)
Config = _ConfigSingleton() # pylint: disable=invalid-name
@dataclass
class BaseSttConfig(Coqpit):
def __post_init__(self):
import tensorflow.compat.v1 as tfv1
# Augmentations
self.augmentations = parse_augmentations(self.augment)
if self.augmentations:
print(f"Parsed augmentations: {self.augmentations}", file=sys.stderr)
if self.augmentations and self.feature_cache and self.cache_for_epochs == 0:
print(
"Due to your feature-cache settings, augmentations of "
"the first epoch will be repeated on all following epochs. "
"This may lead to unintended over-fitting. "
"You can use --cache_for_epochs <n_epochs> to invalidate "
"the cache after a given number of epochs.",
file=sys.stderr,
)
if self.normalize_sample_rate:
self.augmentations = [NormalizeSampleRate(self.audio_sample_rate)] + self[
"augmentations"
]
# Caching
if self.cache_for_epochs == 1:
print(
"--cache_for_epochs == 1 is (re-)creating the feature cache "
"on every epoch but will never use it. You can either set "
"--cache_for_epochs > 1, or not use feature caching at all.",
file=sys.stderr,
)
# Read-buffer
self.read_buffer = parse_file_size(self.read_buffer)
# Set default dropout rates
if self.dropout_rate2 < 0:
self.dropout_rate2 = self.dropout_rate
if self.dropout_rate3 < 0:
self.dropout_rate3 = self.dropout_rate
if self.dropout_rate6 < 0:
self.dropout_rate6 = self.dropout_rate
# Checkpoint dir logic
if self.checkpoint_dir:
# checkpoint_dir always overrides {save,load}_checkpoint_dir
self.save_checkpoint_dir = self.checkpoint_dir
self.load_checkpoint_dir = self.checkpoint_dir
if self.load_train not in ["last", "best", "init", "auto"]:
self.load_train = "auto"
if self.load_evaluate not in ["last", "best", "auto"]:
self.load_evaluate = "auto"
# Set default summary dir
if not self.summary_dir:
self.summary_dir = os.path.join(self.save_checkpoint_dir, "summaries")
# Standard session configuration that'll be used for all new sessions.
self.session_config = tfv1.ConfigProto(
allow_soft_placement=True,
log_device_placement=self.log_placement,
inter_op_parallelism_threads=self.inter_op_parallelism_threads,
intra_op_parallelism_threads=self.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=self.use_allow_growth),
)
# CPU device
self.cpu_device = "/cpu:0"
# Available GPU devices
self.available_devices = get_available_gpus(self.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not self.available_devices:
self.available_devices = [self.cpu_device]
# If neither `--alphabet_config_path` nor `--bytes_output_mode` were specified,
# look for alphabet file alongside loaded checkpoint.
loaded_checkpoint_alphabet_file = os.path.join(
self.load_checkpoint_dir, "alphabet.txt"
)
saved_checkpoint_alphabet_file = os.path.join(
self.save_checkpoint_dir, "alphabet.txt"
)
if not (
bool(self.auto_input_dataset)
!= (self.train_files or self.dev_files or self.test_files)
):
raise RuntimeError(
"When using --auto_input_dataset, do not specify --train_files, "
"--dev_files, or --test_files."
)
if self.auto_input_dataset:
(
gen_train,
gen_dev,
gen_test,
gen_alphabet,
) = create_datasets_from_auto_input(
Path(self.auto_input_dataset),
Path(self.alphabet_config_path) if self.alphabet_config_path else None,
)
self.train_files = [str(gen_train)]
self.dev_files = [str(gen_dev)]
self.test_files = [str(gen_test)]
self.alphabet_config_path = str(gen_alphabet)
if self.bytes_output_mode and self.alphabet_config_path:
raise RuntimeError(
"You cannot set --alphabet_config_path *and* --bytes_output_mode"
)
elif self.bytes_output_mode:
self.alphabet = UTF8Alphabet()
elif self.alphabet_config_path:
self.alphabet = Alphabet(self.alphabet_config_path)
self.effective_alphabet_path = self.alphabet_config_path
elif os.path.exists(loaded_checkpoint_alphabet_file):
print(
"I --alphabet_config_path not specified, but found an alphabet file "
f"alongside specified checkpoint ({loaded_checkpoint_alphabet_file}). "
"Will use this alphabet file for this run.",
file=sys.stderr,
)
self.alphabet = Alphabet(loaded_checkpoint_alphabet_file)
self.effective_alphabet_path = loaded_checkpoint_alphabet_file
elif self.train_files and self.dev_files and self.test_files:
# If all subsets are in the same folder and there's an alphabet file
# alongside them, use it.
self.alphabet = None
sources = self.train_files + self.dev_files + self.test_files
parents = set(Path(p).parent for p in sources)
if len(parents) == 1:
possible_alphabet = list(parents)[0] / "alphabet.txt"
if possible_alphabet.exists():
print(
"I --alphabet_config_path not specified, but all input "
"datasets are present and in the same folder (--train_files, "
"--dev_files and --test_files), and an alphabet.txt file "
f"was found alongside the sets ({possible_alphabet}). "
"Will use this alphabet file for this run.",
file=sys.stderr,
)
self.alphabet = Alphabet(str(possible_alphabet))
self.effective_alphabet_path = possible_alphabet
if not self.alphabet:
# Generate alphabet automatically from input dataset, but only if
# fully specified, to avoid confusion in case a missing set has extra
# characters.
print(
"I --alphabet_config_path not specified, but all input datasets are "
"present (--train_files, --dev_files, --test_files). An alphabet "
"will be generated automatically from the data and placed alongside "
f"the checkpoint ({saved_checkpoint_alphabet_file}).",
file=sys.stderr,
)
characters, alphabet = create_alphabet_from_sources(sources)
print(
f"I Generated alphabet characters: {characters}.",
file=sys.stderr,
)
self.alphabet = alphabet
self.effective_alphabet_path = saved_checkpoint_alphabet_file
else:
if not os.path.isdir(self.load_checkpoint_dir):
raise RuntimeError(
"Missing checkpoint directory (--checkpoint_dir or --load_checkpoint_dir)"
)
raise RuntimeError(
"Missing --alphabet_config_path flag. Couldn't find an alphabet file "
"alongside checkpoint, and input datasets are not fully specified "
"(--train_files, --dev_files, --test_files), so can't generate an alphabet. "
"Either specify an alphabet file or fully specify the dataset, so one will "
"be generated automatically."
)
if not self.save_checkpoint_dir:
raise RuntimeError(
"Missing checkpoint directory (--checkpoint_dir or --save_checkpoint_dir)"
)
# Save flags next to checkpoints
if not is_remote_path(self.save_checkpoint_dir):
os.makedirs(self.save_checkpoint_dir, exist_ok=True)
flags_file = os.path.join(self.save_checkpoint_dir, "flags.txt")
if not os.path.exists(flags_file):
with open_remote(flags_file, "w") as fout:
json.dump(self.serialize(), fout, indent=2)
# Serialize alphabet alongside checkpoint
if not os.path.exists(saved_checkpoint_alphabet_file):
with open_remote(saved_checkpoint_alphabet_file, "wb") as fout:
fout.write(self.alphabet.SerializeText())
# If we have an existing checkpoint with a flags file, load its n_hidden value
prev_flags_file = os.path.join(self.load_checkpoint_dir, "flags.txt")
self.prev_n_hidden = None
if os.path.exists(prev_flags_file):
try:
with open(prev_flags_file) as fin:
parsed = json.load(fin)
prev_n_hidden = parsed["n_hidden"]
if prev_n_hidden != self.n_hidden:
print(
f"W WARNING: --n_hidden value ({self.n_hidden}) is different "
f"from value found in checkpoint ({prev_n_hidden})."
)
print(
"W WARNING: This would result in an error when loading the "
"checkpoint, so n_hidden has been overriden with the "
"checkpoint value."
)
self.n_hidden = prev_n_hidden
except json.JSONDecodeError:
# File exists but is not JSON (older checkpoint), ignore error
pass
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants
# please refer to doc/Geometry.md
# Number of MFCC features
self.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
self.n_context = (
9 # TODO: Determine the optimal value using a validation data set
)
# Number of units in hidden layers
self.n_hidden_1 = self.n_hidden
self.n_hidden_2 = self.n_hidden
self.n_hidden_5 = self.n_hidden
# LSTM cell state dimension
self.n_cell_dim = self.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
self.n_hidden_3 = self.n_cell_dim
# Dims in last layer = number of characters in alphabet plus one
# +1 for CTC blank label
self.n_hidden_6 = self.alphabet.GetSize() + 1
# Size of audio window in samples
if (self.feature_win_len * self.audio_sample_rate) % 1000 != 0:
raise RuntimeError(
"--feature_win_len value ({}) in milliseconds ({}) multiplied "
"by --audio_sample_rate value ({}) must be an integer value. Adjust "
"your --feature_win_len value or resample your audio accordingly."
"".format(
self.feature_win_len,
self.feature_win_len / 1000,
self.audio_sample_rate,
)
)
self.audio_window_samples = self.audio_sample_rate * (
self.feature_win_len / 1000
)
# Stride for feature computations in samples
if (self.feature_win_step * self.audio_sample_rate) % 1000 != 0:
raise RuntimeError(
"--feature_win_step value ({}) in milliseconds ({}) multiplied "
"by --audio_sample_rate value ({}) must be an integer value. Adjust "
"your --feature_win_step value or resample your audio accordingly."
"".format(
self.feature_win_step,
self.feature_win_step / 1000,
self.audio_sample_rate,
)
)
self.audio_step_samples = self.audio_sample_rate * (
self.feature_win_step / 1000
)
if self.one_shot_infer and not path_exists_remote(self.one_shot_infer):
raise RuntimeError(
"Path specified in --one_shot_infer is not a valid file."
)
if self.train_cudnn and self.load_cudnn:
raise RuntimeError(
"Trying to use --train_cudnn, but --load_cudnn "
"was also specified. The --load_cudnn flag is only "
"needed when converting a CuDNN RNN checkpoint to "
"a CPU-capable graph. If your system is capable of "
"using CuDNN RNN, you can just specify the CuDNN RNN "
"checkpoint normally with --save_checkpoint_dir."
)
# sphinx-doc: training_ref_flags_start
train_files: List[str] = field(
default_factory=list,
metadata=dict(
help="space-separated list of files specifying the datasets used for training. Multiple files will get merged. If empty, training will not be run."
),
)
dev_files: List[str] = field(
default_factory=list,
metadata=dict(
help="space-separated list of files specifying the datasets used for validation. Multiple files will get reported separately. If empty, validation will not be run."
),
)
test_files: List[str] = field(
default_factory=list,
metadata=dict(
help="space-separated list of files specifying the datasets used for testing. Multiple files will get reported separately. If empty, the model will not be tested."
),
)
metrics_files: List[str] = field(
default_factory=list,
metadata=dict(
help="space-separated list of files specifying the datasets used for tracking of metrics (after validation step). Currently the only metric is the CTC loss but without affecting the tracking of best validation loss. Multiple files will get reported separately. If empty, metrics will not be computed."
),
)
auto_input_dataset: str = field(
default="",
metadata=dict(
help="path to a single CSV file to use for training. Cannot be specified alongside --train_files, --dev_files, --test_files. Training/validation/testing subsets will be automatically generated from the input, alongside with an alphabet file, if not already present.",
),
)
vocab_file: str = field(
default="",
metadata=dict(
help="For use with evaluate_flashlight - text file containing vocabulary of scorer, one word per line."
),
)
read_buffer: str = field(
default="1MB",
metadata=dict(
help="buffer-size for reading samples from datasets (supports file-size suffixes KB, MB, GB, TB)"
),
)
feature_cache: str = field(
default="",
metadata=dict(
help="cache MFCC features to disk to speed up future training runs on the same data. This flag specifies the path where cached features extracted from --train_files will be saved. If empty, or if online augmentation flags are enabled, caching will be disabled."
),
)
cache_for_epochs: int = field(
default=0,
metadata=dict(
help='after how many epochs the feature cache is invalidated again - 0 for "never"'
),
)
shuffle_batches: bool = field(
default=False,
metadata=dict(
help="reshuffle batches every epoch, starting after N epochs, where N is set by the shuffle_start flag."
),
)
shuffle_start: int = field(
default=1,
metadata=dict(help="epoch to start shuffling batches from (zero-based)."),
)
shuffle_buffer: int = field(
default=1000,
metadata=dict(
help="how many batches to keep in shuffle buffer when shuffling batches."
),
)
feature_win_len: int = field(
default=32,
metadata=dict(help="feature extraction audio window length in milliseconds"),
)
feature_win_step: int = field(
default=20,
metadata=dict(help="feature extraction window step length in milliseconds"),
)
audio_sample_rate: int = field(
default=16000, metadata=dict(help="sample rate value expected by model")
)
normalize_sample_rate: bool = field(
default=True,
metadata=dict(
help="normalize sample rate of all train_files to --audio_sample_rate"
),
)
# Data Augmentation
augment: List[str] = field(
default=None,
metadata=dict(
help='space-separated list of augmenations for training samples. Format is "--augment operation1[param1=value1, ...] operation2[param1=value1, ...] ..."'
),
)
# Global Constants
epochs: int = field(
default=75,
metadata=dict(
help="how many epochs (complete runs through the train files) to train for"
),
)
dropout_rate: float = field(
default=0.05, metadata=dict(help="dropout rate for feedforward layers")
)
dropout_rate2: float = field(
default=-1.0,
metadata=dict(help="dropout rate for layer 2 - defaults to dropout_rate"),
)
dropout_rate3: float = field(
default=-1.0,
metadata=dict(help="dropout rate for layer 3 - defaults to dropout_rate"),
)
dropout_rate4: float = field(
default=0.0, metadata=dict(help="dropout rate for layer 4 - defaults to 0.0")
)
dropout_rate5: float = field(
default=0.0, metadata=dict(help="dropout rate for layer 5 - defaults to 0.0")
)
dropout_rate6: float = field(
default=-1.0,
metadata=dict(help="dropout rate for layer 6 - defaults to dropout_rate"),
)
relu_clip: float = field(
default=20.0, metadata=dict(help="ReLU clipping value for non-recurrent layers")
)
# Adam optimizer(http://arxiv.org/abs/1412.6980) parameters
beta1: float = field(
default=0.9, metadata=dict(help="beta 1 parameter of Adam optimizer")
)
beta2: float = field(
default=0.999, metadata=dict(help="beta 2 parameter of Adam optimizer")
)
epsilon: float = field(
default=1e-8, metadata=dict(help="epsilon parameter of Adam optimizer")
)
learning_rate: float = field(
default=0.001, metadata=dict(help="learning rate of Adam optimizer")
)
# Batch sizes
train_batch_size: int = field(
default=1, metadata=dict(help="number of elements in a training batch")
)
dev_batch_size: int = field(
default=1, metadata=dict(help="number of elements in a validation batch")
)
test_batch_size: int = field(
default=1, metadata=dict(help="number of elements in a test batch")
)
export_batch_size: int = field(
default=1,
metadata=dict(help="number of elements per batch on the exported graph"),
)
# Memory test
skip_batch_test: bool = field(
default=False,
metadata=dict(help="skip batch size memory test before training"),
)
# Performance
inter_op_parallelism_threads: int = field(
default=0,
metadata=dict(
help="number of inter-op parallelism threads - see tf.ConfigProto for more details. USE OF THIS FLAG IS UNSUPPORTED"
),
)
intra_op_parallelism_threads: int = field(
default=0,
metadata=dict(
help="number of intra-op parallelism threads - see tf.ConfigProto for more details. USE OF THIS FLAG IS UNSUPPORTED"
),
)
use_allow_growth: bool = field(
default=False,
metadata=dict(
help="use Allow Growth flag which will allocate only required amount of GPU memory and prevent full allocation of available GPU memory"
),
)
load_cudnn: bool = field(
default=False,
metadata=dict(
help="Specifying this flag allows one to convert a CuDNN RNN checkpoint to a checkpoint capable of running on a CPU graph."
),
)
train_cudnn: bool = field(
default=False,
metadata=dict(
help="use CuDNN RNN backend for training on GPU. Note that checkpoints created with this flag can only be used with CuDNN RNN, i.e. fine tuning on a CPU device will not work"
),
)
automatic_mixed_precision: bool = field(
default=False,
metadata=dict(
help="whether to allow automatic mixed precision training. USE OF THIS FLAG IS UNSUPPORTED. Checkpoints created with automatic mixed precision training will not be usable without mixed precision."
),
)
# Sample limits
limit_test: int = field(
default=0,
metadata=dict(
help="maximum number of elements to use from test set - 0 means no limit"
),
)
# Sample order
reverse_test: bool = field(
default=False, metadata=dict(help="if to reverse sample order of the test set")
)
# Checkpointing
checkpoint_dir: str = field(
default="",
metadata=dict(
help="directory from which checkpoints are loaded and to which they are saved"
),
)
load_checkpoint_dir: str = field(
default="",
metadata=dict(help="directory in which checkpoints are stored"),
)
save_checkpoint_dir: str = field(
default="",
metadata=dict(help="directory to which checkpoints are saved"),
)
checkpoint_secs: int = field(
default=600, metadata=dict(help="checkpoint saving interval in seconds")
)
max_to_keep: int = field(
default=5,
metadata=dict(help="number of checkpoint files to keep - default value is 5"),
)
load_train: str = field(
default="auto",
metadata=dict(
help='what checkpoint to load before starting the training process. "last" for loading most recent epoch checkpoint, "best" for loading best validation loss checkpoint, "init" for initializing a new checkpoint, "auto" for trying several options.'
),
)
load_evaluate: str = field(
default="auto",
metadata=dict(
help='what checkpoint to load for evaluation tasks (test epochs, model export, single file inference, etc). "last" for loading most recent epoch checkpoint, "best" for loading best validation loss checkpoint, "auto" for trying several options.'
),
)
# Transfer Learning
drop_source_layers: int = field(
default=0,
metadata=dict(
help="single integer for how many layers to drop from source model (to drop just output == 1, drop penultimate and output ==2, etc)"
),
)
# Exporting
export_dir: str = field(
default="",
metadata=dict(
help="directory in which exported models are stored - if omitted, the model won't get exported"
),
)
remove_export: bool = field(
default=False, metadata=dict(help="whether to remove old exported models")
)
export_tflite: bool = field(
default=True, metadata=dict(help="export a graph ready for TF Lite engine")
)
export_quantize: bool = field(
default=True,
metadata=dict(help="export a quantized model (optimized for size)"),
)
export_savedmodel: bool = field(
default=False,
metadata=dict(help="export model in TF SavedModel format"),
)
n_steps: int = field(
default=16,
metadata=dict(
help="how many timesteps to process at once by the export graph, higher values mean more latency"
),
)
export_zip: bool = field(
default=False,
metadata=dict(help="export a TFLite model and package with LM and info.json"),
)
export_file_name: str = field(
default="output_graph",
metadata=dict(help="name for the exported model file name"),
)
export_beam_width: int = field(
default=500,
metadata=dict(help="default beam width to embed into exported graph"),
)
# Model metadata
export_author_id: str = field(
default="author",
metadata=dict(
help="author of the exported model. GitHub user or organization name used to uniquely identify the author of this model"
),
)
export_model_name: str = field(
default="model",
metadata=dict(
help="name of the exported model. Must not contain forward slashes."
),
)
export_model_version: str = field(
default="0.0.1",
metadata=dict(
help="semantic version of the exported model. See https://semver.org/. This is fully controlled by you as author of the model and has no required connection with Coqui STT versions"
),
)
def field_val_equals_help(val_desc):
return field(default="<{}>".format(val_desc), metadata=dict(help=val_desc))
export_contact_info: str = field_val_equals_help(
"public contact information of the author. Can be an email address, or a link to a contact form, issue tracker, or discussion forum. Must provide a way to reach the model authors"
)
export_license: str = field_val_equals_help(
"SPDX identifier of the license of the exported model. See https://spdx.org/licenses/. If the license does not have an SPDX identifier, use the license name."
)
export_language: str = field_val_equals_help(
'language the model was trained on - IETF BCP 47 language tag including at least language, script and region subtags. E.g. "en-Latn-UK" or "de-Latn-DE" or "cmn-Hans-CN". Include as much info as you can without loss of precision. For example, if a model is trained on Scottish English, include the variant subtag: "en-Latn-GB-Scotland".'
)
export_min_stt_version: str = field_val_equals_help(
"minimum Coqui STT version (inclusive) the exported model is compatible with"
)
export_max_stt_version: str = field_val_equals_help(
"maximum Coqui STT version (inclusive) the exported model is compatible with"
)
export_description: str = field_val_equals_help(
"Freeform description of the model being exported. Markdown accepted. You can also leave this flag unchanged and edit the generated .md file directly. Useful things to describe are demographic and acoustic characteristics of the data used to train the model, any architectural changes, names of public datasets that were used when applicable, hyperparameters used for training, evaluation results on standard benchmark datasets, etc."
)
# Reporting
log_level: int = field(
default=1,
metadata=dict(
help="log level for console logs - 0: DEBUG, 1: INFO, 2: WARN, 3: ERROR"
),
)
show_progressbar: bool = field(
default=True,
metadata=dict(
help="Show progress for training, validation and testing processes. Log level should be > 0."
),
)
log_placement: bool = field(
default=False,
metadata=dict(
help="whether to log device placement of the operators to the console"
),
)
report_count: int = field(
default=5,
metadata=dict(
help="number of phrases for each of best WER, median WER and worst WER to print out during a WER report"
),
)
summary_dir: str = field(
default="",
metadata=dict(
help='target directory for TensorBoard summaries - defaults to directory "summaries" within the checkpoint folder'
),
)
test_output_file: str = field(
default="",
metadata=dict(
help="path to a file to save all src/decoded/distance/loss tuples generated during a test epoch"
),
)
# Geometry
n_hidden: int = field(
default=2048, metadata=dict(help="layer width to use when initialising layers")
)
layer_norm: bool = field(
default=False,
metadata=dict(
help="wether to use layer-normalization after each fully-connected layer (except the last one)"
),
)
# Initialization
random_seed: int = field(
default=4568,
metadata=dict(help="default random seed that is used to initialize variables"),
)
# Early Stopping
early_stop: bool = field(
default=False,
metadata=dict(
help="Enable early stopping mechanism over validation dataset. If validation is not being run, early stopping is disabled."
),
)
es_epochs: int = field(
default=25,
metadata=dict(
help="Number of epochs with no improvement after which training will be stopped. Loss is not stored in the checkpoint so when checkpoint is revived it starts the loss calculation from start at that point"
),
)
es_min_delta: float = field(
default=0.05,
metadata=dict(
help="Minimum change in loss to qualify as an improvement. This value will also be used in Reduce learning rate on plateau"
),
)
# Reduce learning rate on plateau
reduce_lr_on_plateau: bool = field(
default=False,
metadata=dict(
help="Enable reducing the learning rate if a plateau is reached. This is the case if the validation loss did not improve for some epochs."
),
)
plateau_epochs: int = field(
default=10,
metadata=dict(
help="Number of epochs to consider for RLROP. Has to be smaller than es_epochs from early stopping"
),
)
plateau_reduction: float = field(
default=0.1,
metadata=dict(
help="Multiplicative factor to apply to the current learning rate if a plateau has occurred."
),
)
force_initialize_learning_rate: bool = field(
default=False,
metadata=dict(
help="Force re-initialization of learning rate which was previously reduced."
),
)
# Decoder
bytes_output_mode: bool = field(
default=False,
metadata=dict(
help="enable Bytes Output Mode mode. When this is used the model outputs UTF-8 byte values directly rather than using an alphabet mapping. The --alphabet_config_path option will be ignored. See the training documentation for more details."
),
)
alphabet_config_path: str = field(
default="",
metadata=dict(
help="path to the configuration file specifying the alphabet used by the network. See the comment in data/alphabet.txt for a description of the format."
),
)
scorer_path: str = field(
default="", metadata=dict(help="path to the external scorer file.")
)
beam_width: int = field(
default=1024,
metadata=dict(
help="beam width used in the CTC decoder when building candidate transcriptions"
),
)
# TODO move these defaults into some sort of external (inheritable?) configuration
lm_alpha: float = field(
default=0.931289039105002,
metadata=dict(
help="the alpha hyperparameter of the CTC decoder. Language Model weight."
),
)
lm_beta: float = field(
default=1.1834137581510284,
metadata=dict(
help="the beta hyperparameter of the CTC decoder. Word insertion weight."
),
)
cutoff_prob: float = field(
default=1.0,
metadata=dict(
help="only consider characters until this probability mass is reached. 1.0 = disabled."
),
)
cutoff_top_n: int = field(
default=300,
metadata=dict(
help="only process this number of characters sorted by probability mass for each time step. If bigger than alphabet size, disabled."
),
)
# Inference mode
one_shot_infer: str = field(
default=None,
metadata=dict(
help="one-shot inference mode: specify a wav file and the script will load the checkpoint and perform inference on it."
),
)
# Optimizer mode
lm_alpha_max: int = field(
default=5,
metadata=dict(
help="the maximum of the alpha hyperparameter of the CTC decoder explored during hyperparameter optimization. Language Model weight."
),
)
lm_beta_max: int = field(
default=5,
metadata=dict(
help="the maximum beta hyperparameter of the CTC decoder explored during hyperparameter optimization. Word insertion weight."
),
)
n_trials: int = field(
default=2400,
metadata=dict(
help="the number of trials to run during hyperparameter optimization."
),
)
# sphinx-doc: training_ref_flags_end
def initialize_globals_from_cli():
c = BaseSttConfig.init_from_argparse(arg_prefix="")
_ConfigSingleton._config = c # pylint: disable=protected-access
def initialize_globals_from_args(**override_args):
# Update Config with new args
c = BaseSttConfig(**override_args)
_ConfigSingleton._config = c # pylint: disable=protected-access
def initialize_globals_from_instance(config):
"""Initialize Config singleton from an existing instance"""
_ConfigSingleton._config = config # pylint: disable=protected-access
# Logging functions
# =================
def prefix_print(prefix, message):
print(prefix + ("\n" + prefix).join(message.split("\n")))
def log_debug(message):
if Config.log_level == 0:
prefix_print("D ", message)
def log_info(message):
if Config.log_level <= 1:
prefix_print("I ", message)
def log_warn(message):
if Config.log_level <= 2:
prefix_print("W ", message)
def log_error(message):
if Config.log_level <= 3:
prefix_print("E ", message)
def create_progressbar(*args, **kwargs):
# Progress bars in stdout by default
if "fd" not in kwargs:
kwargs["fd"] = sys.stdout
if Config.show_progressbar:
return progressbar.ProgressBar(*args, **kwargs)
return progressbar.NullBar(*args, **kwargs)
def log_progress(message):
if not Config.show_progressbar:
log_info(message)
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/far/farextract.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Extracts component FSTs from an finite-state archive.
#include <string>
#include <vector>
#include <fst/flags.h>
#include <fst/extensions/far/farscript.h>
#include <fst/extensions/far/getters.h>
DEFINE_string(filename_prefix, "", "Prefix to append to filenames");
DEFINE_string(filename_suffix, "", "Suffix to append to filenames");
DEFINE_int32(generate_filenames, 0,
"Generate N digit numeric filenames (def: use keys)");
DEFINE_string(keys, "",
"Extract set of keys separated by comma (default) "
"including ranges delimited by dash (default)");
DEFINE_string(key_separator, ",", "Separator for individual keys");
DEFINE_string(range_delimiter, "-", "Delimiter for ranges of keys");
int main(int argc, char **argv) {
namespace s = fst::script;
string usage = "Extracts FSTs from a finite-state archive.\n\n Usage:";
usage += argv[0];
usage += " [in1.far in2.far...]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
s::ExpandArgs(argc, argv, &argc, &argv);
std::vector<string> in_fnames;
for (int i = 1; i < argc; ++i) in_fnames.push_back(argv[i]);
if (in_fnames.empty()) in_fnames.push_back("");
const auto arc_type = s::LoadArcTypeFromFar(in_fnames[0]);
if (arc_type.empty()) return 1;
s::FarExtract(in_fnames, arc_type, FLAGS_generate_filenames, FLAGS_keys,
FLAGS_key_separator, FLAGS_range_delimiter,
FLAGS_filename_prefix, FLAGS_filename_suffix);
return 0;
}
| 0 |
coqui_public_repos/TTS/TTS/vocoder | coqui_public_repos/TTS/TTS/vocoder/utils/generic_utils.py | from typing import Dict
import numpy as np
import torch
from matplotlib import pyplot as plt
from TTS.tts.utils.visual import plot_spectrogram
from TTS.utils.audio import AudioProcessor
def interpolate_vocoder_input(scale_factor, spec):
"""Interpolate spectrogram by the scale factor.
It is mainly used to match the sampling rates of
the tts and vocoder models.
Args:
scale_factor (float): scale factor to interpolate the spectrogram
spec (np.array): spectrogram to be interpolated
Returns:
torch.tensor: interpolated spectrogram.
"""
print(" > before interpolation :", spec.shape)
spec = torch.tensor(spec).unsqueeze(0).unsqueeze(0) # pylint: disable=not-callable
spec = torch.nn.functional.interpolate(
spec, scale_factor=scale_factor, recompute_scale_factor=True, mode="bilinear", align_corners=False
).squeeze(0)
print(" > after interpolation :", spec.shape)
return spec
def plot_results(y_hat: torch.tensor, y: torch.tensor, ap: AudioProcessor, name_prefix: str = None) -> Dict:
"""Plot the predicted and the real waveform and their spectrograms.
Args:
y_hat (torch.tensor): Predicted waveform.
y (torch.tensor): Real waveform.
ap (AudioProcessor): Audio processor used to process the waveform.
name_prefix (str, optional): Name prefix used to name the figures. Defaults to None.
Returns:
Dict: output figures keyed by the name of the figures.
""" """Plot vocoder model results"""
if name_prefix is None:
name_prefix = ""
# select an instance from batch
y_hat = y_hat[0].squeeze().detach().cpu().numpy()
y = y[0].squeeze().detach().cpu().numpy()
spec_fake = ap.melspectrogram(y_hat).T
spec_real = ap.melspectrogram(y).T
spec_diff = np.abs(spec_fake - spec_real)
# plot figure and save it
fig_wave = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_hat)
plt.title("generated speech")
plt.tight_layout()
plt.close()
figures = {
name_prefix + "spectrogram/fake": plot_spectrogram(spec_fake),
name_prefix + "spectrogram/real": plot_spectrogram(spec_real),
name_prefix + "spectrogram/diff": plot_spectrogram(spec_diff),
name_prefix + "speech_comparison": fig_wave,
}
return figures
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/models/xtts.py | import os
from dataclasses import dataclass
import librosa
import torch
import torch.nn.functional as F
import torchaudio
from coqpit import Coqpit
from TTS.tts.layers.xtts.gpt import GPT
from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder
from TTS.tts.layers.xtts.stream_generator import init_stream_support
from TTS.tts.layers.xtts.tokenizer import VoiceBpeTokenizer, split_sentence
from TTS.tts.layers.xtts.xtts_manager import SpeakerManager, LanguageManager
from TTS.tts.models.base_tts import BaseTTS
from TTS.utils.io import load_fsspec
init_stream_support()
def wav_to_mel_cloning(
wav,
mel_norms_file="../experiments/clips_mel_norms.pth",
mel_norms=None,
device=torch.device("cpu"),
n_fft=4096,
hop_length=1024,
win_length=4096,
power=2,
normalized=False,
sample_rate=22050,
f_min=0,
f_max=8000,
n_mels=80,
):
"""
Convert waveform to mel-spectrogram with hard-coded parameters for cloning.
Args:
wav (torch.Tensor): Input waveform tensor.
mel_norms_file (str): Path to mel-spectrogram normalization file.
mel_norms (torch.Tensor): Mel-spectrogram normalization tensor.
device (torch.device): Device to use for computation.
Returns:
torch.Tensor: Mel-spectrogram tensor.
"""
mel_stft = torchaudio.transforms.MelSpectrogram(
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
power=power,
normalized=normalized,
sample_rate=sample_rate,
f_min=f_min,
f_max=f_max,
n_mels=n_mels,
norm="slaney",
).to(device)
wav = wav.to(device)
mel = mel_stft(wav)
mel = torch.log(torch.clamp(mel, min=1e-5))
if mel_norms is None:
mel_norms = torch.load(mel_norms_file, map_location=device)
mel = mel / mel_norms.unsqueeze(0).unsqueeze(-1)
return mel
def load_audio(audiopath, sampling_rate):
# better load setting following: https://github.com/faroit/python_audio_loading_benchmark
# torchaudio should chose proper backend to load audio depending on platform
audio, lsr = torchaudio.load(audiopath)
# stereo to mono if needed
if audio.size(0) != 1:
audio = torch.mean(audio, dim=0, keepdim=True)
if lsr != sampling_rate:
audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
# Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
# '10' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
if torch.any(audio > 10) or not torch.any(audio < 0):
print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
# clip audio invalid values
audio.clip_(-1, 1)
return audio
def pad_or_truncate(t, length):
"""
Ensure a given tensor t has a specified sequence length by either padding it with zeros or clipping it.
Args:
t (torch.Tensor): The input tensor to be padded or truncated.
length (int): The desired length of the tensor.
Returns:
torch.Tensor: The padded or truncated tensor.
"""
tp = t[..., :length]
if t.shape[-1] == length:
tp = t
elif t.shape[-1] < length:
tp = F.pad(t, (0, length - t.shape[-1]))
return tp
@dataclass
class XttsAudioConfig(Coqpit):
"""
Configuration class for audio-related parameters in the XTTS model.
Args:
sample_rate (int): The sample rate in which the GPT operates.
output_sample_rate (int): The sample rate of the output audio waveform.
"""
sample_rate: int = 22050
output_sample_rate: int = 24000
@dataclass
class XttsArgs(Coqpit):
"""A dataclass to represent XTTS model arguments that define the model structure.
Args:
gpt_batch_size (int): The size of the auto-regressive batch.
enable_redaction (bool, optional): Whether to enable redaction. Defaults to True.
kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True.
gpt_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None.
clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None.
decoder_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None.
num_chars (int, optional): The maximum number of characters to generate. Defaults to 255.
For GPT model:
gpt_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604.
gpt_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402.
gpt_max_prompt_tokens (int, optional): The maximum prompt tokens or the autoregressive model. Defaults to 70.
gpt_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30.
gpt_n_model_channels (int, optional): The model dimension for the autoregressive model. Defaults to 1024.
gpt_n_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16.
gpt_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255.
gpt_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255.
gpt_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False.
gpt_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False.
gpt_code_stride_len (int, optional): The hop_size of dvae and consequently of the gpt output. Defaults to 1024.
gpt_use_masking_gt_prompt_approach (bool, optional): If True, it will use ground truth as prompt and it will mask the loss to avoid repetition. Defaults to True.
gpt_use_perceiver_resampler (bool, optional): If True, it will use perceiver resampler from flamingo paper - https://arxiv.org/abs/2204.14198. Defaults to False.
"""
gpt_batch_size: int = 1
enable_redaction: bool = False
kv_cache: bool = True
gpt_checkpoint: str = None
clvp_checkpoint: str = None
decoder_checkpoint: str = None
num_chars: int = 255
# XTTS GPT Encoder params
tokenizer_file: str = ""
gpt_max_audio_tokens: int = 605
gpt_max_text_tokens: int = 402
gpt_max_prompt_tokens: int = 70
gpt_layers: int = 30
gpt_n_model_channels: int = 1024
gpt_n_heads: int = 16
gpt_number_text_tokens: int = None
gpt_start_text_token: int = None
gpt_stop_text_token: int = None
gpt_num_audio_tokens: int = 8194
gpt_start_audio_token: int = 8192
gpt_stop_audio_token: int = 8193
gpt_code_stride_len: int = 1024
gpt_use_masking_gt_prompt_approach: bool = True
gpt_use_perceiver_resampler: bool = False
# HifiGAN Decoder params
input_sample_rate: int = 22050
output_sample_rate: int = 24000
output_hop_length: int = 256
decoder_input_dim: int = 1024
d_vector_dim: int = 512
cond_d_vector_in_each_upsampling_layer: bool = True
# constants
duration_const: int = 102400
class Xtts(BaseTTS):
"""ⓍTTS model implementation.
❗ Currently it only supports inference.
Examples:
>>> from TTS.tts.configs.xtts_config import XttsConfig
>>> from TTS.tts.models.xtts import Xtts
>>> config = XttsConfig()
>>> model = Xtts.inif_from_config(config)
>>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True)
"""
def __init__(self, config: Coqpit):
super().__init__(config, ap=None, tokenizer=None)
self.mel_stats_path = None
self.config = config
self.gpt_checkpoint = self.args.gpt_checkpoint
self.decoder_checkpoint = self.args.decoder_checkpoint # TODO: check if this is even needed
self.models_dir = config.model_dir
self.gpt_batch_size = self.args.gpt_batch_size
self.tokenizer = VoiceBpeTokenizer()
self.gpt = None
self.init_models()
self.register_buffer("mel_stats", torch.ones(80))
def init_models(self):
"""Initialize the models. We do it here since we need to load the tokenizer first."""
if self.tokenizer.tokenizer is not None:
self.args.gpt_number_text_tokens = self.tokenizer.get_number_tokens()
self.args.gpt_start_text_token = self.tokenizer.tokenizer.token_to_id("[START]")
self.args.gpt_stop_text_token = self.tokenizer.tokenizer.token_to_id("[STOP]")
if self.args.gpt_number_text_tokens:
self.gpt = GPT(
layers=self.args.gpt_layers,
model_dim=self.args.gpt_n_model_channels,
start_text_token=self.args.gpt_start_text_token,
stop_text_token=self.args.gpt_stop_text_token,
heads=self.args.gpt_n_heads,
max_text_tokens=self.args.gpt_max_text_tokens,
max_mel_tokens=self.args.gpt_max_audio_tokens,
max_prompt_tokens=self.args.gpt_max_prompt_tokens,
number_text_tokens=self.args.gpt_number_text_tokens,
num_audio_tokens=self.args.gpt_num_audio_tokens,
start_audio_token=self.args.gpt_start_audio_token,
stop_audio_token=self.args.gpt_stop_audio_token,
use_perceiver_resampler=self.args.gpt_use_perceiver_resampler,
code_stride_len=self.args.gpt_code_stride_len,
)
self.hifigan_decoder = HifiDecoder(
input_sample_rate=self.args.input_sample_rate,
output_sample_rate=self.args.output_sample_rate,
output_hop_length=self.args.output_hop_length,
ar_mel_length_compression=self.args.gpt_code_stride_len,
decoder_input_dim=self.args.decoder_input_dim,
d_vector_dim=self.args.d_vector_dim,
cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer,
)
@property
def device(self):
return next(self.parameters()).device
@torch.inference_mode()
def get_gpt_cond_latents(self, audio, sr, length: int = 30, chunk_length: int = 6):
"""Compute the conditioning latents for the GPT model from the given audio.
Args:
audio (tensor): audio tensor.
sr (int): Sample rate of the audio.
length (int): Length of the audio in seconds. If < 0, use the whole audio. Defaults to 30.
chunk_length (int): Length of the audio chunks in seconds. When `length == chunk_length`, the whole audio
is being used without chunking. It must be < `length`. Defaults to 6.
"""
if sr != 22050:
audio = torchaudio.functional.resample(audio, sr, 22050)
if length > 0:
audio = audio[:, : 22050 * length]
if self.args.gpt_use_perceiver_resampler:
style_embs = []
for i in range(0, audio.shape[1], 22050 * chunk_length):
audio_chunk = audio[:, i : i + 22050 * chunk_length]
# if the chunk is too short ignore it
if audio_chunk.size(-1) < 22050 * 0.33:
continue
mel_chunk = wav_to_mel_cloning(
audio_chunk,
mel_norms=self.mel_stats.cpu(),
n_fft=2048,
hop_length=256,
win_length=1024,
power=2,
normalized=False,
sample_rate=22050,
f_min=0,
f_max=8000,
n_mels=80,
)
style_emb = self.gpt.get_style_emb(mel_chunk.to(self.device), None)
style_embs.append(style_emb)
# mean style embedding
cond_latent = torch.stack(style_embs).mean(dim=0)
else:
mel = wav_to_mel_cloning(
audio,
mel_norms=self.mel_stats.cpu(),
n_fft=4096,
hop_length=1024,
win_length=4096,
power=2,
normalized=False,
sample_rate=22050,
f_min=0,
f_max=8000,
n_mels=80,
)
cond_latent = self.gpt.get_style_emb(mel.to(self.device))
return cond_latent.transpose(1, 2)
@torch.inference_mode()
def get_speaker_embedding(self, audio, sr):
audio_16k = torchaudio.functional.resample(audio, sr, 16000)
return (
self.hifigan_decoder.speaker_encoder.forward(audio_16k.to(self.device), l2_norm=True)
.unsqueeze(-1)
.to(self.device)
)
@torch.inference_mode()
def get_conditioning_latents(
self,
audio_path,
max_ref_length=30,
gpt_cond_len=6,
gpt_cond_chunk_len=6,
librosa_trim_db=None,
sound_norm_refs=False,
load_sr=22050,
):
"""Get the conditioning latents for the GPT model from the given audio.
Args:
audio_path (str or List[str]): Path to reference audio file(s).
max_ref_length (int): Maximum length of each reference audio in seconds. Defaults to 30.
gpt_cond_len (int): Length of the audio used for gpt latents. Defaults to 6.
gpt_cond_chunk_len (int): Chunk length used for gpt latents. It must be <= gpt_conf_len. Defaults to 6.
librosa_trim_db (int, optional): Trim the audio using this value. If None, not trimming. Defaults to None.
sound_norm_refs (bool, optional): Whether to normalize the audio. Defaults to False.
load_sr (int, optional): Sample rate to load the audio. Defaults to 24000.
"""
# deal with multiples references
if not isinstance(audio_path, list):
audio_paths = [audio_path]
else:
audio_paths = audio_path
speaker_embeddings = []
audios = []
speaker_embedding = None
for file_path in audio_paths:
audio = load_audio(file_path, load_sr)
audio = audio[:, : load_sr * max_ref_length].to(self.device)
if sound_norm_refs:
audio = (audio / torch.abs(audio).max()) * 0.75
if librosa_trim_db is not None:
audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0]
# compute latents for the decoder
speaker_embedding = self.get_speaker_embedding(audio, load_sr)
speaker_embeddings.append(speaker_embedding)
audios.append(audio)
# merge all the audios and compute the latents for the gpt
full_audio = torch.cat(audios, dim=-1)
gpt_cond_latents = self.get_gpt_cond_latents(
full_audio, load_sr, length=gpt_cond_len, chunk_length=gpt_cond_chunk_len
) # [1, 1024, T]
if speaker_embeddings:
speaker_embedding = torch.stack(speaker_embeddings)
speaker_embedding = speaker_embedding.mean(dim=0)
return gpt_cond_latents, speaker_embedding
def synthesize(self, text, config, speaker_wav, language, speaker_id=None, **kwargs):
"""Synthesize speech with the given input text.
Args:
text (str): Input text.
config (XttsConfig): Config with inference parameters.
speaker_wav (list): List of paths to the speaker audio files to be used for cloning.
language (str): Language ID of the speaker.
**kwargs: Inference settings. See `inference()`.
Returns:
A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference,
`text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents`
as latents used at inference.
"""
assert (
"zh-cn" if language == "zh" else language in self.config.languages
), f" ❗ Language {language} is not supported. Supported languages are {self.config.languages}"
# Use generally found best tuning knobs for generation.
settings = {
"temperature": config.temperature,
"length_penalty": config.length_penalty,
"repetition_penalty": config.repetition_penalty,
"top_k": config.top_k,
"top_p": config.top_p,
}
settings.update(kwargs) # allow overriding of preset settings with kwargs
if speaker_id is not None:
gpt_cond_latent, speaker_embedding = self.speaker_manager.speakers[speaker_id].values()
return self.inference(text, language, gpt_cond_latent, speaker_embedding, **settings)
settings.update({
"gpt_cond_len": config.gpt_cond_len,
"gpt_cond_chunk_len": config.gpt_cond_chunk_len,
"max_ref_len": config.max_ref_len,
"sound_norm_refs": config.sound_norm_refs,
})
return self.full_inference(text, speaker_wav, language, **settings)
@torch.inference_mode()
def full_inference(
self,
text,
ref_audio_path,
language,
# GPT inference
temperature=0.75,
length_penalty=1.0,
repetition_penalty=10.0,
top_k=50,
top_p=0.85,
do_sample=True,
# Cloning
gpt_cond_len=30,
gpt_cond_chunk_len=6,
max_ref_len=10,
sound_norm_refs=False,
**hf_generate_kwargs,
):
"""
This function produces an audio clip of the given text being spoken with the given reference voice.
Args:
text: (str) Text to be spoken.
ref_audio_path: (str) Path to a reference audio file to be used for cloning. This audio file should be >3
seconds long.
language: (str) Language of the voice to be generated.
temperature: (float) The softmax temperature of the autoregressive model. Defaults to 0.65.
length_penalty: (float) A length penalty applied to the autoregressive decoder. Higher settings causes the
model to produce more terse outputs. Defaults to 1.0.
repetition_penalty: (float) A penalty that prevents the autoregressive decoder from repeating itself during
decoding. Can be used to reduce the incidence of long silences or "uhhhhhhs", etc. Defaults to 2.0.
top_k: (int) K value used in top-k sampling. [0,inf]. Lower values mean the decoder produces more "likely"
(aka boring) outputs. Defaults to 50.
top_p: (float) P value used in nucleus sampling. (0,1]. Lower values mean the decoder produces more "likely"
(aka boring) outputs. Defaults to 0.8.
gpt_cond_len: (int) Length of the audio used for cloning. If audio is shorter, then audio length is used
else the first `gpt_cond_len` secs is used. Defaults to 30 seconds.
gpt_cond_chunk_len: (int) Chunk length used for cloning. It must be <= `gpt_cond_len`.
If gpt_cond_len == gpt_cond_chunk_len, no chunking. Defaults to 6 seconds.
hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive
transformer. Extra keyword args fed to this function get forwarded directly to that API. Documentation
here: https://huggingface.co/docs/transformers/internal/generation_utils
Returns:
Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length.
Sample rate is 24kHz.
"""
(gpt_cond_latent, speaker_embedding) = self.get_conditioning_latents(
audio_path=ref_audio_path,
gpt_cond_len=gpt_cond_len,
gpt_cond_chunk_len=gpt_cond_chunk_len,
max_ref_length=max_ref_len,
sound_norm_refs=sound_norm_refs,
)
return self.inference(
text,
language,
gpt_cond_latent,
speaker_embedding,
temperature=temperature,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
top_k=top_k,
top_p=top_p,
do_sample=do_sample,
**hf_generate_kwargs,
)
@torch.inference_mode()
def inference(
self,
text,
language,
gpt_cond_latent,
speaker_embedding,
# GPT inference
temperature=0.75,
length_penalty=1.0,
repetition_penalty=10.0,
top_k=50,
top_p=0.85,
do_sample=True,
num_beams=1,
speed=1.0,
enable_text_splitting=False,
**hf_generate_kwargs,
):
language = language.split("-")[0] # remove the country code
length_scale = 1.0 / max(speed, 0.05)
gpt_cond_latent = gpt_cond_latent.to(self.device)
speaker_embedding = speaker_embedding.to(self.device)
if enable_text_splitting:
text = split_sentence(text, language, self.tokenizer.char_limits[language])
else:
text = [text]
wavs = []
gpt_latents_list = []
for sent in text:
sent = sent.strip().lower()
text_tokens = torch.IntTensor(self.tokenizer.encode(sent, lang=language)).unsqueeze(0).to(self.device)
assert (
text_tokens.shape[-1] < self.args.gpt_max_text_tokens
), " ❗ XTTS can only generate text with a maximum of 400 tokens."
with torch.no_grad():
gpt_codes = self.gpt.generate(
cond_latents=gpt_cond_latent,
text_inputs=text_tokens,
input_tokens=None,
do_sample=do_sample,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_return_sequences=self.gpt_batch_size,
num_beams=num_beams,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
output_attentions=False,
**hf_generate_kwargs,
)
expected_output_len = torch.tensor(
[gpt_codes.shape[-1] * self.gpt.code_stride_len], device=text_tokens.device
)
text_len = torch.tensor([text_tokens.shape[-1]], device=self.device)
gpt_latents = self.gpt(
text_tokens,
text_len,
gpt_codes,
expected_output_len,
cond_latents=gpt_cond_latent,
return_attentions=False,
return_latent=True,
)
if length_scale != 1.0:
gpt_latents = F.interpolate(
gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear"
).transpose(1, 2)
gpt_latents_list.append(gpt_latents.cpu())
wavs.append(self.hifigan_decoder(gpt_latents, g=speaker_embedding).cpu().squeeze())
return {
"wav": torch.cat(wavs, dim=0).numpy(),
"gpt_latents": torch.cat(gpt_latents_list, dim=1).numpy(),
"speaker_embedding": speaker_embedding,
}
def handle_chunks(self, wav_gen, wav_gen_prev, wav_overlap, overlap_len):
"""Handle chunk formatting in streaming mode"""
wav_chunk = wav_gen[:-overlap_len]
if wav_gen_prev is not None:
wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len) : -overlap_len]
if wav_overlap is not None:
# cross fade the overlap section
if overlap_len > len(wav_chunk):
# wav_chunk is smaller than overlap_len, pass on last wav_gen
if wav_gen_prev is not None:
wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len) :]
else:
# not expecting will hit here as problem happens on last chunk
wav_chunk = wav_gen[-overlap_len:]
return wav_chunk, wav_gen, None
else:
crossfade_wav = wav_chunk[:overlap_len]
crossfade_wav = crossfade_wav * torch.linspace(0.0, 1.0, overlap_len).to(crossfade_wav.device)
wav_chunk[:overlap_len] = wav_overlap * torch.linspace(1.0, 0.0, overlap_len).to(wav_overlap.device)
wav_chunk[:overlap_len] += crossfade_wav
wav_overlap = wav_gen[-overlap_len:]
wav_gen_prev = wav_gen
return wav_chunk, wav_gen_prev, wav_overlap
@torch.inference_mode()
def inference_stream(
self,
text,
language,
gpt_cond_latent,
speaker_embedding,
# Streaming
stream_chunk_size=20,
overlap_wav_len=1024,
# GPT inference
temperature=0.75,
length_penalty=1.0,
repetition_penalty=10.0,
top_k=50,
top_p=0.85,
do_sample=True,
speed=1.0,
enable_text_splitting=False,
**hf_generate_kwargs,
):
language = language.split("-")[0] # remove the country code
length_scale = 1.0 / max(speed, 0.05)
gpt_cond_latent = gpt_cond_latent.to(self.device)
speaker_embedding = speaker_embedding.to(self.device)
if enable_text_splitting:
text = split_sentence(text, language, self.tokenizer.char_limits[language])
else:
text = [text]
for sent in text:
sent = sent.strip().lower()
text_tokens = torch.IntTensor(self.tokenizer.encode(sent, lang=language)).unsqueeze(0).to(self.device)
assert (
text_tokens.shape[-1] < self.args.gpt_max_text_tokens
), " ❗ XTTS can only generate text with a maximum of 400 tokens."
fake_inputs = self.gpt.compute_embeddings(
gpt_cond_latent.to(self.device),
text_tokens,
)
gpt_generator = self.gpt.get_generator(
fake_inputs=fake_inputs,
top_k=top_k,
top_p=top_p,
temperature=temperature,
do_sample=do_sample,
num_beams=1,
num_return_sequences=1,
length_penalty=float(length_penalty),
repetition_penalty=float(repetition_penalty),
output_attentions=False,
output_hidden_states=True,
**hf_generate_kwargs,
)
last_tokens = []
all_latents = []
wav_gen_prev = None
wav_overlap = None
is_end = False
while not is_end:
try:
x, latent = next(gpt_generator)
last_tokens += [x]
all_latents += [latent]
except StopIteration:
is_end = True
if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size):
gpt_latents = torch.cat(all_latents, dim=0)[None, :]
if length_scale != 1.0:
gpt_latents = F.interpolate(
gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear"
).transpose(1, 2)
wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device))
wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks(
wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len
)
last_tokens = []
yield wav_chunk
def forward(self):
raise NotImplementedError(
"XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training"
)
def eval_step(self):
raise NotImplementedError(
"XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training"
)
@staticmethod
def init_from_config(config: "XttsConfig", **kwargs): # pylint: disable=unused-argument
return Xtts(config)
def eval(self): # pylint: disable=redefined-builtin
"""Sets the model to evaluation mode. Overrides the default eval() method to also set the GPT model to eval mode."""
self.gpt.init_gpt_for_inference()
super().eval()
def get_compatible_checkpoint_state_dict(self, model_path):
checkpoint = load_fsspec(model_path, map_location=torch.device("cpu"))["model"]
# remove xtts gpt trainer extra keys
ignore_keys = ["torch_mel_spectrogram_style_encoder", "torch_mel_spectrogram_dvae", "dvae"]
for key in list(checkpoint.keys()):
# check if it is from the coqui Trainer if so convert it
if key.startswith("xtts."):
new_key = key.replace("xtts.", "")
checkpoint[new_key] = checkpoint[key]
del checkpoint[key]
key = new_key
# remove unused keys
if key.split(".")[0] in ignore_keys:
del checkpoint[key]
return checkpoint
def load_checkpoint(
self,
config,
checkpoint_dir=None,
checkpoint_path=None,
vocab_path=None,
eval=True,
strict=True,
use_deepspeed=False,
speaker_file_path=None,
):
"""
Loads a checkpoint from disk and initializes the model's state and tokenizer.
Args:
config (dict): The configuration dictionary for the model.
checkpoint_dir (str, optional): The directory where the checkpoint is stored. Defaults to None.
checkpoint_path (str, optional): The path to the checkpoint file. Defaults to None.
vocab_path (str, optional): The path to the vocabulary file. Defaults to None.
eval (bool, optional): Whether to set the model to evaluation mode. Defaults to True.
strict (bool, optional): Whether to strictly enforce that the keys in the checkpoint match the keys in the model. Defaults to True.
Returns:
None
"""
model_path = checkpoint_path or os.path.join(checkpoint_dir, "model.pth")
vocab_path = vocab_path or os.path.join(checkpoint_dir, "vocab.json")
speaker_file_path = speaker_file_path or os.path.join(checkpoint_dir, "speakers_xtts.pth")
self.language_manager = LanguageManager(config)
self.speaker_manager = None
if os.path.exists(speaker_file_path):
self.speaker_manager = SpeakerManager(speaker_file_path)
if os.path.exists(vocab_path):
self.tokenizer = VoiceBpeTokenizer(vocab_file=vocab_path)
self.init_models()
checkpoint = self.get_compatible_checkpoint_state_dict(model_path)
# deal with v1 and v1.1. V1 has the init_gpt_for_inference keys, v1.1 do not
try:
self.load_state_dict(checkpoint, strict=strict)
except:
if eval:
self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache)
self.load_state_dict(checkpoint, strict=strict)
if eval:
self.hifigan_decoder.eval()
self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache, use_deepspeed=use_deepspeed)
self.gpt.eval()
def train_step(self):
raise NotImplementedError(
"XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training"
)
| 0 |
coqui_public_repos/STT/native_client/java/app/src/main/res | coqui_public_repos/STT/native_client/java/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml | <?xml version="1.0" encoding="utf-8"?>
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
<background android:drawable="@drawable/ic_launcher_background" />
<foreground android:drawable="@drawable/ic_launcher_foreground" />
</adaptive-icon>
| 0 |
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external | coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson/encodedstream.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef CEREAL_RAPIDJSON_ENCODEDSTREAM_H_
#define CEREAL_RAPIDJSON_ENCODEDSTREAM_H_
#include "stream.h"
#include "memorystream.h"
#ifdef __GNUC__
CEREAL_RAPIDJSON_DIAG_PUSH
CEREAL_RAPIDJSON_DIAG_OFF(effc++)
#endif
#ifdef __clang__
CEREAL_RAPIDJSON_DIAG_PUSH
CEREAL_RAPIDJSON_DIAG_OFF(padded)
#endif
CEREAL_RAPIDJSON_NAMESPACE_BEGIN
//! Input byte stream wrapper with a statically bound encoding.
/*!
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
\tparam InputByteStream Type of input byte stream. For example, FileReadStream.
*/
template <typename Encoding, typename InputByteStream>
class EncodedInputStream {
CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
public:
typedef typename Encoding::Ch Ch;
EncodedInputStream(InputByteStream& is) : is_(is) {
current_ = Encoding::TakeBOM(is_);
}
Ch Peek() const { return current_; }
Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; }
size_t Tell() const { return is_.Tell(); }
// Not implemented
void Put(Ch) { CEREAL_RAPIDJSON_ASSERT(false); }
void Flush() { CEREAL_RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
private:
EncodedInputStream(const EncodedInputStream&);
EncodedInputStream& operator=(const EncodedInputStream&);
InputByteStream& is_;
Ch current_;
};
//! Specialized for UTF8 MemoryStream.
template <>
class EncodedInputStream<UTF8<>, MemoryStream> {
public:
typedef UTF8<>::Ch Ch;
EncodedInputStream(MemoryStream& is) : is_(is) {
if (static_cast<unsigned char>(is_.Peek()) == 0xEFu) is_.Take();
if (static_cast<unsigned char>(is_.Peek()) == 0xBBu) is_.Take();
if (static_cast<unsigned char>(is_.Peek()) == 0xBFu) is_.Take();
}
Ch Peek() const { return is_.Peek(); }
Ch Take() { return is_.Take(); }
size_t Tell() const { return is_.Tell(); }
// Not implemented
void Put(Ch) {}
void Flush() {}
Ch* PutBegin() { return 0; }
size_t PutEnd(Ch*) { return 0; }
MemoryStream& is_;
private:
EncodedInputStream(const EncodedInputStream&);
EncodedInputStream& operator=(const EncodedInputStream&);
};
//! Output byte stream wrapper with statically bound encoding.
/*!
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
\tparam OutputByteStream Type of input byte stream. For example, FileWriteStream.
*/
template <typename Encoding, typename OutputByteStream>
class EncodedOutputStream {
CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
public:
typedef typename Encoding::Ch Ch;
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
if (putBOM)
Encoding::PutBOM(os_);
}
void Put(Ch c) { Encoding::Put(os_, c); }
void Flush() { os_.Flush(); }
// Not implemented
Ch Peek() const { CEREAL_RAPIDJSON_ASSERT(false); return 0;}
Ch Take() { CEREAL_RAPIDJSON_ASSERT(false); return 0;}
size_t Tell() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
private:
EncodedOutputStream(const EncodedOutputStream&);
EncodedOutputStream& operator=(const EncodedOutputStream&);
OutputByteStream& os_;
};
#define CEREAL_RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
//! Input stream wrapper with dynamically bound encoding and automatic encoding detection.
/*!
\tparam CharType Type of character for reading.
\tparam InputByteStream type of input byte stream to be wrapped.
*/
template <typename CharType, typename InputByteStream>
class AutoUTFInputStream {
CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
public:
typedef CharType Ch;
//! Constructor.
/*!
\param is input stream to be wrapped.
\param type UTF encoding type if it is not detected from the stream.
*/
AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
CEREAL_RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
DetectType();
static const TakeFunc f[] = { CEREAL_RAPIDJSON_ENCODINGS_FUNC(Take) };
takeFunc_ = f[type_];
current_ = takeFunc_(*is_);
}
UTFType GetType() const { return type_; }
bool HasBOM() const { return hasBOM_; }
Ch Peek() const { return current_; }
Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; }
size_t Tell() const { return is_->Tell(); }
// Not implemented
void Put(Ch) { CEREAL_RAPIDJSON_ASSERT(false); }
void Flush() { CEREAL_RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
private:
AutoUTFInputStream(const AutoUTFInputStream&);
AutoUTFInputStream& operator=(const AutoUTFInputStream&);
// Detect encoding type with BOM or RFC 4627
void DetectType() {
// BOM (Byte Order Mark):
// 00 00 FE FF UTF-32BE
// FF FE 00 00 UTF-32LE
// FE FF UTF-16BE
// FF FE UTF-16LE
// EF BB BF UTF-8
const unsigned char* c = reinterpret_cast<const unsigned char *>(is_->Peek4());
if (!c)
return;
unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24));
hasBOM_ = false;
if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); }
else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); }
else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); }
// RFC 4627: Section 3
// "Since the first two characters of a JSON text will always be ASCII
// characters [RFC0020], it is possible to determine whether an octet
// stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
// at the pattern of nulls in the first four octets."
// 00 00 00 xx UTF-32BE
// 00 xx 00 xx UTF-16BE
// xx 00 00 00 UTF-32LE
// xx 00 xx 00 UTF-16LE
// xx xx xx xx UTF-8
if (!hasBOM_) {
int pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0);
switch (pattern) {
case 0x08: type_ = kUTF32BE; break;
case 0x0A: type_ = kUTF16BE; break;
case 0x01: type_ = kUTF32LE; break;
case 0x05: type_ = kUTF16LE; break;
case 0x0F: type_ = kUTF8; break;
default: break; // Use type defined by user.
}
}
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
if (type_ == kUTF16LE || type_ == kUTF16BE) CEREAL_RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
if (type_ == kUTF32LE || type_ == kUTF32BE) CEREAL_RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
}
typedef Ch (*TakeFunc)(InputByteStream& is);
InputByteStream* is_;
UTFType type_;
Ch current_;
TakeFunc takeFunc_;
bool hasBOM_;
};
//! Output stream wrapper with dynamically bound encoding and automatic encoding detection.
/*!
\tparam CharType Type of character for writing.
\tparam OutputByteStream type of output byte stream to be wrapped.
*/
template <typename CharType, typename OutputByteStream>
class AutoUTFOutputStream {
CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
public:
typedef CharType Ch;
//! Constructor.
/*!
\param os output stream to be wrapped.
\param type UTF encoding type.
\param putBOM Whether to write BOM at the beginning of the stream.
*/
AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) {
CEREAL_RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
if (type_ == kUTF16LE || type_ == kUTF16BE) CEREAL_RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
if (type_ == kUTF32LE || type_ == kUTF32BE) CEREAL_RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
static const PutFunc f[] = { CEREAL_RAPIDJSON_ENCODINGS_FUNC(Put) };
putFunc_ = f[type_];
if (putBOM)
PutBOM();
}
UTFType GetType() const { return type_; }
void Put(Ch c) { putFunc_(*os_, c); }
void Flush() { os_->Flush(); }
// Not implemented
Ch Peek() const { CEREAL_RAPIDJSON_ASSERT(false); return 0;}
Ch Take() { CEREAL_RAPIDJSON_ASSERT(false); return 0;}
size_t Tell() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; }
private:
AutoUTFOutputStream(const AutoUTFOutputStream&);
AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
void PutBOM() {
typedef void (*PutBOMFunc)(OutputByteStream&);
static const PutBOMFunc f[] = { CEREAL_RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
f[type_](*os_);
}
typedef void (*PutFunc)(OutputByteStream&, Ch);
OutputByteStream* os_;
UTFType type_;
PutFunc putFunc_;
};
#undef CEREAL_RAPIDJSON_ENCODINGS_FUNC
CEREAL_RAPIDJSON_NAMESPACE_END
#ifdef __clang__
CEREAL_RAPIDJSON_DIAG_POP
#endif
#ifdef __GNUC__
CEREAL_RAPIDJSON_DIAG_POP
#endif
#endif // CEREAL_RAPIDJSON_FILESTREAM_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/replace-util.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Utility classes for the recursive replacement of FSTs (RTNs).
#ifndef FST_REPLACE_UTIL_H_
#define FST_REPLACE_UTIL_H_
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <fst/log.h>
#include <fst/connect.h>
#include <fst/mutable-fst.h>
#include <fst/topsort.h>
#include <fst/vector-fst.h>
namespace fst {
// This specifies what labels to output on the call or return arc. Note that
// REPLACE_LABEL_INPUT and REPLACE_LABEL_OUTPUT will produce transducers when
// applied to acceptors.
enum ReplaceLabelType {
// Epsilon labels on both input and output.
REPLACE_LABEL_NEITHER = 1,
// Non-epsilon labels on input and epsilon on output.
REPLACE_LABEL_INPUT = 2,
// Epsilon on input and non-epsilon on output.
REPLACE_LABEL_OUTPUT = 3,
// Non-epsilon labels on both input and output.
REPLACE_LABEL_BOTH = 4
};
// By default ReplaceUtil will copy the input label of the replace arc.
// The call_label_type and return_label_type options specify how to manage
// the labels of the call arc and the return arc of the replace FST
struct ReplaceUtilOptions {
int64 root; // Root rule for expansion.
ReplaceLabelType call_label_type; // How to label call arc.
ReplaceLabelType return_label_type; // How to label return arc.
int64 return_label; // Label to put on return arc.
explicit ReplaceUtilOptions(
int64 root = kNoLabel,
ReplaceLabelType call_label_type = REPLACE_LABEL_INPUT,
ReplaceLabelType return_label_type = REPLACE_LABEL_NEITHER,
int64 return_label = 0)
: root(root),
call_label_type(call_label_type),
return_label_type(return_label_type),
return_label(return_label) {}
// For backwards compatibility.
ReplaceUtilOptions(int64 root, bool epsilon_replace_arc)
: ReplaceUtilOptions(root,
epsilon_replace_arc ? REPLACE_LABEL_NEITHER
: REPLACE_LABEL_INPUT) {}
};
// Every non-terminal on a path appears as the first label on that path in every
// FST associated with a given SCC of the replace dependency graph. This would
// be true if the SCC were formed from left-linear grammar rules.
constexpr uint8 kReplaceSCCLeftLinear = 0x01;
// Every non-terminal on a path appears as the final label on that path in every
// FST associated with a given SCC of the replace dependency graph. This would
// be true if the SCC were formed from right-linear grammar rules.
constexpr uint8 kReplaceSCCRightLinear = 0x02;
// The SCC in the replace dependency graph has more than one state or a
// self-loop.
constexpr uint8 kReplaceSCCNonTrivial = 0x04;
// Defined in replace.h.
template <class Arc>
void Replace(
const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &,
MutableFst<Arc> *, const ReplaceUtilOptions &);
// Utility class for the recursive replacement of FSTs (RTNs). The user provides
// a set of label/FST pairs at construction. These are used by methods for
// testing cyclic dependencies and connectedness and doing RTN connection and
// specific FST replacement by label or for various optimization properties. The
// modified results can be obtained with the GetFstPairs() or
// GetMutableFstPairs() methods.
template <class Arc>
class ReplaceUtil {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstPair = std::pair<Label, const Fst<Arc> *>;
using MutableFstPair = std::pair<Label, MutableFst<Arc> *>;
using NonTerminalHash = std::unordered_map<Label, Label>;
// Constructs from mutable FSTs; FST ownership is given to ReplaceUtil.
ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs,
const ReplaceUtilOptions &opts);
// Constructs from FSTs; FST ownership is retained by caller.
ReplaceUtil(const std::vector<FstPair> &fst_pairs,
const ReplaceUtilOptions &opts);
// Constructs from ReplaceFst internals; FST ownership is retained by caller.
ReplaceUtil(const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array,
const NonTerminalHash &nonterminal_hash,
const ReplaceUtilOptions &opts);
~ReplaceUtil() {
for (Label i = 0; i < fst_array_.size(); ++i) delete fst_array_[i];
}
// True if the non-terminal dependencies are cyclic. Cyclic dependencies will
// result in an unexpandable FST.
bool CyclicDependencies() const {
GetDependencies(false);
return depprops_ & kCyclic;
}
// Returns the strongly-connected component ID in the dependency graph of the
// replace FSTS.
StateId SCC(Label label) const {
GetDependencies(false);
const auto it = nonterminal_hash_.find(label);
if (it == nonterminal_hash_.end()) return kNoStateId;
return depscc_[it->second];
}
// Returns properties for the strongly-connected component in the dependency
// graph of the replace FSTs. If the SCC is kReplaceSCCLeftLinear or
// kReplaceSCCRightLinear, that SCC can be represented as finite-state despite
// any cyclic dependencies, but not by the usual replacement operation (see
// fst/extensions/pdt/replace.h).
uint8 SCCProperties(StateId scc_id) {
GetSCCProperties();
return depsccprops_[scc_id];
}
// Returns true if no useless FSTs, states or transitions are present in the
// RTN.
bool Connected() const {
GetDependencies(false);
uint64 props = kAccessible | kCoAccessible;
for (Label i = 0; i < fst_array_.size(); ++i) {
if (!fst_array_[i]) continue;
if (fst_array_[i]->Properties(props, true) != props || !depaccess_[i]) {
return false;
}
}
return true;
}
// Removes useless FSTs, states and transitions from the RTN.
void Connect();
// Replaces FSTs specified by labels, unless there are cyclic dependencies.
void ReplaceLabels(const std::vector<Label> &labels);
// Replaces FSTs that have at most nstates states, narcs arcs and nnonterm
// non-terminals (updating in reverse dependency order), unless there are
// cyclic dependencies.
void ReplaceBySize(size_t nstates, size_t narcs, size_t nnonterms);
// Replaces singleton FSTS, unless there are cyclic dependencies.
void ReplaceTrivial() { ReplaceBySize(2, 1, 1); }
// Replaces non-terminals that have at most ninstances instances (updating in
// dependency order), unless there are cyclic dependencies.
void ReplaceByInstances(size_t ninstances);
// Replaces non-terminals that have only one instance, unless there are cyclic
// dependencies.
void ReplaceUnique() { ReplaceByInstances(1); }
// Returns label/FST pairs, retaining FST ownership.
void GetFstPairs(std::vector<FstPair> *fst_pairs);
// Returns label/mutable FST pairs, giving FST ownership over to the caller.
void GetMutableFstPairs(std::vector<MutableFstPair> *mutable_fst_pairs);
private:
// FST statistics.
struct ReplaceStats {
StateId nstates; // Number of states.
StateId nfinal; // Number of final states.
size_t narcs; // Number of arcs.
Label nnonterms; // Number of non-terminals in FST.
size_t nref; // Number of non-terminal instances referring to this FST.
// Number of times that ith FST references this FST
std::map<Label, size_t> inref;
// Number of times that this FST references the ith FST
std::map<Label, size_t> outref;
ReplaceStats() : nstates(0), nfinal(0), narcs(0), nnonterms(0), nref(0) {}
};
// Checks that Mutable FSTs exists, creating them if necessary.
void CheckMutableFsts();
// Computes the dependency graph for the RTN, computing dependency statistics
// if stats is true.
void GetDependencies(bool stats) const;
void ClearDependencies() const {
depfst_.DeleteStates();
stats_.clear();
depprops_ = 0;
depsccprops_.clear();
have_stats_ = false;
}
// Gets topological order of dependencies, returning false with cyclic input.
bool GetTopOrder(const Fst<Arc> &fst, std::vector<Label> *toporder) const;
// Updates statistics to reflect the replacement of the jth FST.
void UpdateStats(Label j);
// Computes the properties for the strongly-connected component in the
// dependency graph of the replace FSTs.
void GetSCCProperties() const;
Label root_label_; // Root non-terminal.
Label root_fst_; // Root FST ID.
ReplaceLabelType call_label_type_; // See Replace().
ReplaceLabelType return_label_type_; // See Replace().
int64 return_label_; // See Replace().
std::vector<const Fst<Arc> *> fst_array_; // FST per ID.
std::vector<MutableFst<Arc> *> mutable_fst_array_; // Mutable FST per ID.
std::vector<Label> nonterminal_array_; // FST ID to non-terminal.
NonTerminalHash nonterminal_hash_; // Non-terminal to FST ID.
mutable VectorFst<Arc> depfst_; // FST ID dependencies.
mutable std::vector<StateId> depscc_; // FST SCC ID.
mutable std::vector<bool> depaccess_; // FST ID accessibility.
mutable uint64 depprops_; // Dependency FST props.
mutable bool have_stats_; // Have dependency statistics?
mutable std::vector<ReplaceStats> stats_; // Per-FST statistics.
mutable std::vector<uint8> depsccprops_; // SCC properties.
ReplaceUtil(const ReplaceUtil &) = delete;
ReplaceUtil &operator=(const ReplaceUtil &) = delete;
};
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs,
const ReplaceUtilOptions &opts)
: root_label_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
mutable_fst_array_.push_back(nullptr);
nonterminal_array_.push_back(kNoLabel);
for (const auto &fst_pair : fst_pairs) {
const auto label = fst_pair.first;
auto *fst = fst_pair.second;
nonterminal_hash_[label] = fst_array_.size();
nonterminal_array_.push_back(label);
fst_array_.push_back(fst);
mutable_fst_array_.push_back(fst);
}
root_fst_ = nonterminal_hash_[root_label_];
if (!root_fst_) {
FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_;
}
}
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(const std::vector<FstPair> &fst_pairs,
const ReplaceUtilOptions &opts)
: root_label_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
nonterminal_array_.push_back(kNoLabel);
for (const auto &fst_pair : fst_pairs) {
const auto label = fst_pair.first;
const auto *fst = fst_pair.second;
nonterminal_hash_[label] = fst_array_.size();
nonterminal_array_.push_back(label);
fst_array_.push_back(fst->Copy());
}
root_fst_ = nonterminal_hash_[root_label_];
if (!root_fst_) {
FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_;
}
}
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(
const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array,
const NonTerminalHash &nonterminal_hash, const ReplaceUtilOptions &opts)
: root_fst_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
nonterminal_array_(fst_array.size()),
nonterminal_hash_(nonterminal_hash),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
for (size_t i = 1; i < fst_array.size(); ++i) {
fst_array_.push_back(fst_array[i]->Copy());
}
for (auto it = nonterminal_hash.begin(); it != nonterminal_hash.end(); ++it) {
nonterminal_array_[it->second] = it->first;
}
root_label_ = nonterminal_array_[root_fst_];
}
template <class Arc>
void ReplaceUtil<Arc>::GetDependencies(bool stats) const {
if (depfst_.NumStates() > 0) {
if (stats && !have_stats_) {
ClearDependencies();
} else {
return;
}
}
have_stats_ = stats;
if (have_stats_) stats_.reserve(fst_array_.size());
for (Label i = 0; i < fst_array_.size(); ++i) {
depfst_.AddState();
depfst_.SetFinal(i, Weight::One());
if (have_stats_) stats_.push_back(ReplaceStats());
}
depfst_.SetStart(root_fst_);
// An arc from each state (representing the FST) to the state representing the
// FST being replaced
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto *ifst = fst_array_[i];
if (!ifst) continue;
for (StateIterator<Fst<Arc>> siter(*ifst); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
if (have_stats_) {
++stats_[i].nstates;
if (ifst->Final(s) != Weight::Zero()) ++stats_[i].nfinal;
}
for (ArcIterator<Fst<Arc>> aiter(*ifst, s); !aiter.Done();
aiter.Next()) {
if (have_stats_) ++stats_[i].narcs;
const auto &arc = aiter.Value();
auto it = nonterminal_hash_.find(arc.olabel);
if (it != nonterminal_hash_.end()) {
const auto j = it->second;
depfst_.AddArc(i, Arc(arc.olabel, arc.olabel, Weight::One(), j));
if (have_stats_) {
++stats_[i].nnonterms;
++stats_[j].nref;
++stats_[j].inref[i];
++stats_[i].outref[j];
}
}
}
}
}
// Computes accessibility info.
SccVisitor<Arc> scc_visitor(&depscc_, &depaccess_, nullptr, &depprops_);
DfsVisit(depfst_, &scc_visitor);
}
template <class Arc>
void ReplaceUtil<Arc>::UpdateStats(Label j) {
if (!have_stats_) {
FSTERROR() << "ReplaceUtil::UpdateStats: Stats not available";
return;
}
if (j == root_fst_) return; // Can't replace root.
for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) {
const auto i = in->first;
const auto ni = in->second;
stats_[i].nstates += stats_[j].nstates * ni;
stats_[i].narcs += (stats_[j].narcs + 1) * ni;
stats_[i].nnonterms += (stats_[j].nnonterms - 1) * ni;
stats_[i].outref.erase(j);
for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end();
++out) {
const auto k = out->first;
const auto nk = out->second;
stats_[i].outref[k] += ni * nk;
}
}
for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end();
++out) {
const auto k = out->first;
const auto nk = out->second;
stats_[k].nref -= nk;
stats_[k].inref.erase(j);
for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) {
const auto i = in->first;
const auto ni = in->second;
stats_[k].inref[i] += ni * nk;
stats_[k].nref += ni * nk;
}
}
}
template <class Arc>
void ReplaceUtil<Arc>::CheckMutableFsts() {
if (mutable_fst_array_.empty()) {
for (Label i = 0; i < fst_array_.size(); ++i) {
if (!fst_array_[i]) {
mutable_fst_array_.push_back(nullptr);
} else {
mutable_fst_array_.push_back(new VectorFst<Arc>(*fst_array_[i]));
delete fst_array_[i];
fst_array_[i] = mutable_fst_array_[i];
}
}
}
}
template <class Arc>
void ReplaceUtil<Arc>::Connect() {
CheckMutableFsts();
static constexpr auto props = kAccessible | kCoAccessible;
for (auto *mutable_fst : mutable_fst_array_) {
if (!mutable_fst) continue;
if (mutable_fst->Properties(props, false) != props) {
fst::Connect(mutable_fst);
}
}
GetDependencies(false);
for (Label i = 0; i < mutable_fst_array_.size(); ++i) {
auto *fst = mutable_fst_array_[i];
if (fst && !depaccess_[i]) {
delete fst;
fst_array_[i] = nullptr;
mutable_fst_array_[i] = nullptr;
}
}
ClearDependencies();
}
template <class Arc>
bool ReplaceUtil<Arc>::GetTopOrder(const Fst<Arc> &fst,
std::vector<Label> *toporder) const {
// Finds topological order of dependencies.
std::vector<StateId> order;
bool acyclic = false;
TopOrderVisitor<Arc> top_order_visitor(&order, &acyclic);
DfsVisit(fst, &top_order_visitor);
if (!acyclic) {
LOG(WARNING) << "ReplaceUtil::GetTopOrder: Cyclical label dependencies";
return false;
}
toporder->resize(order.size());
for (Label i = 0; i < order.size(); ++i) (*toporder)[order[i]] = i;
return true;
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceLabels(const std::vector<Label> &labels) {
CheckMutableFsts();
std::unordered_set<Label> label_set;
for (const auto label : labels) {
// Can't replace root.
if (label != root_label_) label_set.insert(label);
}
// Finds FST dependencies restricted to the labels requested.
GetDependencies(false);
VectorFst<Arc> pfst(depfst_);
for (StateId i = 0; i < pfst.NumStates(); ++i) {
std::vector<Arc> arcs;
for (ArcIterator<VectorFst<Arc>> aiter(pfst, i); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto label = nonterminal_array_[arc.nextstate];
if (label_set.count(label) > 0) arcs.push_back(arc);
}
pfst.DeleteArcs(i);
for (const auto &arc : arcs) pfst.AddArc(i, arc);
}
std::vector<Label> toporder;
if (!GetTopOrder(pfst, &toporder)) {
ClearDependencies();
return;
}
// Visits FSTs in reverse topological order of dependencies and performs
// replacements.
for (Label o = toporder.size() - 1; o >= 0; --o) {
std::vector<FstPair> fst_pairs;
auto s = toporder[o];
for (ArcIterator<VectorFst<Arc>> aiter(pfst, s); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto label = nonterminal_array_[arc.nextstate];
const auto *fst = fst_array_[arc.nextstate];
fst_pairs.push_back(std::make_pair(label, fst));
}
if (fst_pairs.empty()) continue;
const auto label = nonterminal_array_[s];
const auto *fst = fst_array_[s];
fst_pairs.push_back(std::make_pair(label, fst));
const ReplaceUtilOptions opts(label, call_label_type_, return_label_type_,
return_label_);
Replace(fst_pairs, mutable_fst_array_[s], opts);
}
ClearDependencies();
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceBySize(size_t nstates, size_t narcs,
size_t nnonterms) {
std::vector<Label> labels;
GetDependencies(true);
std::vector<Label> toporder;
if (!GetTopOrder(depfst_, &toporder)) {
ClearDependencies();
return;
}
for (Label o = toporder.size() - 1; o >= 0; --o) {
const auto j = toporder[o];
if (stats_[j].nstates <= nstates && stats_[j].narcs <= narcs &&
stats_[j].nnonterms <= nnonterms) {
labels.push_back(nonterminal_array_[j]);
UpdateStats(j);
}
}
ReplaceLabels(labels);
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceByInstances(size_t ninstances) {
std::vector<Label> labels;
GetDependencies(true);
std::vector<Label> toporder;
if (!GetTopOrder(depfst_, &toporder)) {
ClearDependencies();
return;
}
for (Label o = 0; o < toporder.size(); ++o) {
const auto j = toporder[o];
if (stats_[j].nref <= ninstances) {
labels.push_back(nonterminal_array_[j]);
UpdateStats(j);
}
}
ReplaceLabels(labels);
}
template <class Arc>
void ReplaceUtil<Arc>::GetFstPairs(std::vector<FstPair> *fst_pairs) {
CheckMutableFsts();
fst_pairs->clear();
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto label = nonterminal_array_[i];
const auto *fst = fst_array_[i];
if (!fst) continue;
fst_pairs->push_back(std::make_pair(label, fst));
}
}
template <class Arc>
void ReplaceUtil<Arc>::GetMutableFstPairs(
std::vector<MutableFstPair> *mutable_fst_pairs) {
CheckMutableFsts();
mutable_fst_pairs->clear();
for (Label i = 0; i < mutable_fst_array_.size(); ++i) {
const auto label = nonterminal_array_[i];
const auto *fst = mutable_fst_array_[i];
if (!fst) continue;
mutable_fst_pairs->push_back(std::make_pair(label, fst->Copy()));
}
}
template <class Arc>
void ReplaceUtil<Arc>::GetSCCProperties() const {
if (!depsccprops_.empty()) return;
GetDependencies(false);
if (depscc_.empty()) return;
for (StateId scc = 0; scc < depscc_.size(); ++scc) {
depsccprops_.push_back(kReplaceSCCLeftLinear | kReplaceSCCRightLinear);
}
if (!(depprops_ & kCyclic)) return; // No cyclic dependencies.
// Checks for self-loops in the dependency graph.
for (StateId scc = 0; scc < depscc_.size(); ++scc) {
for (ArcIterator<Fst<Arc> > aiter(depfst_, scc);
!aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
if (arc.nextstate == scc) { // SCC has a self loop.
depsccprops_[scc] |= kReplaceSCCNonTrivial;
}
}
}
std::vector<bool> depscc_visited(depscc_.size(), false);
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto *fst = fst_array_[i];
if (!fst) continue;
const auto depscc = depscc_[i];
if (depscc_visited[depscc]) { // SCC has more than one state.
depsccprops_[depscc] |= kReplaceSCCNonTrivial;
}
depscc_visited[depscc] = true;
std::vector<StateId> fstscc; // SCCs of the current FST.
uint64 fstprops;
SccVisitor<Arc> scc_visitor(&fstscc, nullptr, nullptr, &fstprops);
DfsVisit(*fst, &scc_visitor);
for (StateIterator<Fst<Arc>> siter(*fst); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
for (ArcIterator<Fst<Arc>> aiter(*fst, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
auto it = nonterminal_hash_.find(arc.olabel);
if (it == nonterminal_hash_.end() || depscc_[it->second] != depscc) {
continue; // Skips if a terminal or a non-terminal not in SCC.
}
const bool arc_in_cycle = fstscc[s] == fstscc[arc.nextstate];
// Left linear iff all non-terminals are initial.
if (s != fst->Start() || arc_in_cycle) {
depsccprops_[depscc] &= ~kReplaceSCCLeftLinear;
}
// Right linear iff all non-terminals are final.
if (fst->Final(arc.nextstate) == Weight::Zero() || arc_in_cycle) {
depsccprops_[depscc] &= ~kReplaceSCCRightLinear;
}
}
}
}
}
} // namespace fst
#endif // FST_REPLACE_UTIL_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/compact/compact16_unweighted_acceptor-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/compact-fst.h>
namespace fst {
static FstRegisterer<
CompactUnweightedAcceptorFst<StdArc, uint16>>
CompactUnweightedAcceptorFst_StdArc_uint16_registerer;
static FstRegisterer<
CompactUnweightedAcceptorFst<LogArc, uint16>>
CompactUnweightedAcceptorFst_LogArc_uint16_registerer;
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/visit.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Queue-dependent visitation of finite-state transducers. See also dfs-visit.h.
#ifndef FST_VISIT_H_
#define FST_VISIT_H_
#include <fst/arcfilter.h>
#include <fst/mutable-fst.h>
namespace fst {
// Visitor Interface: class determining actions taken during a visit. If any of
// the boolean member functions return false, the visit is aborted by first
// calling FinishState() on all unfinished (grey) states and then calling
// FinishVisit().
//
// Note this is more general than the visitor interface in dfs-visit.h but lacks
// some DFS-specific behavior.
//
// template <class Arc>
// class Visitor {
// public:
// using StateId = typename Arc::StateId;
//
// Visitor(T *return_data);
//
// // Invoked before visit.
// void InitVisit(const Fst<Arc> &fst);
//
// // Invoked when state discovered (2nd arg is visitation root).
// bool InitState(StateId s, StateId root);
//
// // Invoked when arc to white/undiscovered state examined.
// bool WhiteArc(StateId s, const Arc &arc);
//
// // Invoked when arc to grey/unfinished state examined.
// bool GreyArc(StateId s, const Arc &arc);
//
// // Invoked when arc to black/finished state examined.
// bool BlackArc(StateId s, const Arc &arc);
//
// // Invoked when state finished.
// void FinishState(StateId s);
//
// // Invoked after visit.
// void FinishVisit();
// };
// Performs queue-dependent visitation. Visitor class argument determines
// actions and contains any return data. ArcFilter determines arcs that are
// considered. If 'access_only' is true, performs visitation only to states
// accessible from the initial state.
template <class FST, class Visitor, class Queue, class ArcFilter>
void Visit(const FST &fst, Visitor *visitor, Queue *queue, ArcFilter filter,
bool access_only = false) {
using Arc = typename FST::Arc;
using StateId = typename Arc::StateId;
visitor->InitVisit(fst);
const auto start = fst.Start();
if (start == kNoStateId) {
visitor->FinishVisit();
return;
}
// An FST's state's visit color.
static constexpr uint8_t kWhiteState = 0x01; // Undiscovered.
static constexpr uint8_t kGreyState = 0x02; // Discovered & unfinished.
static constexpr uint8_t kBlackState = 0x04; // Finished.
// We destroy an iterator as soon as possible and mark it so.
static constexpr uint8_t kArcIterDone = 0x08;
std::vector<uint8_t> state_status;
std::vector<ArcIterator<FST> *> arc_iterator;
MemoryPool<ArcIterator<FST>> aiter_pool;
StateId nstates = start + 1; // Number of known states in general case.
bool expanded = false;
if (fst.Properties(kExpanded, false)) { // Tests if expanded, then uses
nstates = CountStates(fst); // ExpandedFst::NumStates().
expanded = true;
}
state_status.resize(nstates, kWhiteState);
arc_iterator.resize(nstates);
StateIterator<Fst<Arc>> siter(fst);
// Continues visit while true.
bool visit = true;
// Iterates over trees in visit forest.
for (auto root = start; visit && root < nstates;) {
visit = visitor->InitState(root, root);
state_status[root] = kGreyState;
queue->Enqueue(root);
while (!queue->Empty()) {
auto state = queue->Head();
if (state >= state_status.size()) {
nstates = state + 1;
state_status.resize(nstates, kWhiteState);
arc_iterator.resize(nstates);
}
// Creates arc iterator if needed.
if (!arc_iterator[state] && !(state_status[state] & kArcIterDone) &&
visit) {
arc_iterator[state] = new (&aiter_pool) ArcIterator<FST>(fst, state);
}
// Deletes arc iterator if done.
auto *aiter = arc_iterator[state];
if ((aiter && aiter->Done()) || !visit) {
Destroy(aiter, &aiter_pool);
arc_iterator[state] = nullptr;
state_status[state] |= kArcIterDone;
}
// Dequeues state and marks black if done.
if (state_status[state] & kArcIterDone) {
queue->Dequeue();
visitor->FinishState(state);
state_status[state] = kBlackState;
continue;
}
const auto &arc = aiter->Value();
if (arc.nextstate >= state_status.size()) {
nstates = arc.nextstate + 1;
state_status.resize(nstates, kWhiteState);
arc_iterator.resize(nstates);
}
// Visits respective arc types.
if (filter(arc)) {
// Enqueues destination state and marks grey if white.
if (state_status[arc.nextstate] == kWhiteState) {
visit = visitor->WhiteArc(state, arc);
if (!visit) continue;
visit = visitor->InitState(arc.nextstate, root);
state_status[arc.nextstate] = kGreyState;
queue->Enqueue(arc.nextstate);
} else if (state_status[arc.nextstate] == kBlackState) {
visit = visitor->BlackArc(state, arc);
} else {
visit = visitor->GreyArc(state, arc);
}
}
aiter->Next();
// Destroys an iterator ASAP for efficiency.
if (aiter->Done()) {
Destroy(aiter, &aiter_pool);
arc_iterator[state] = nullptr;
state_status[state] |= kArcIterDone;
}
}
if (access_only) break;
// Finds next tree root.
for (root = (root == start) ? 0 : root + 1;
root < nstates && state_status[root] != kWhiteState; ++root) {
}
// Check for a state beyond the largest known state.
if (!expanded && root == nstates) {
for (; !siter.Done(); siter.Next()) {
if (siter.Value() == nstates) {
++nstates;
state_status.push_back(kWhiteState);
arc_iterator.push_back(nullptr);
break;
}
}
}
}
visitor->FinishVisit();
}
template <class Arc, class Visitor, class Queue>
inline void Visit(const Fst<Arc> &fst, Visitor *visitor, Queue *queue) {
Visit(fst, visitor, queue, AnyArcFilter<Arc>());
}
// Copies input FST to mutable FST following queue order.
template <class A>
class CopyVisitor {
public:
using Arc = A;
using StateId = typename Arc::StateId;
explicit CopyVisitor(MutableFst<Arc> *ofst) : ifst_(nullptr), ofst_(ofst) {}
void InitVisit(const Fst<A> &ifst) {
ifst_ = &ifst;
ofst_->DeleteStates();
ofst_->SetStart(ifst_->Start());
}
bool InitState(StateId state, StateId) {
while (ofst_->NumStates() <= state) ofst_->AddState();
return true;
}
bool WhiteArc(StateId state, const Arc &arc) {
ofst_->AddArc(state, arc);
return true;
}
bool GreyArc(StateId state, const Arc &arc) {
ofst_->AddArc(state, arc);
return true;
}
bool BlackArc(StateId state, const Arc &arc) {
ofst_->AddArc(state, arc);
return true;
}
void FinishState(StateId state) {
ofst_->SetFinal(state, ifst_->Final(state));
}
void FinishVisit() {}
private:
const Fst<Arc> *ifst_;
MutableFst<Arc> *ofst_;
};
// Visits input FST up to a state limit following queue order.
template <class A>
class PartialVisitor {
public:
using Arc = A;
using StateId = typename Arc::StateId;
explicit PartialVisitor(StateId maxvisit)
: fst_(nullptr), maxvisit_(maxvisit) {}
void InitVisit(const Fst<A> &ifst) {
fst_ = &ifst;
ninit_ = 0;
nfinish_ = 0;
}
bool InitState(StateId state, StateId root) {
++ninit_;
return ninit_ <= maxvisit_;
}
bool WhiteArc(StateId state, const Arc &arc) { return true; }
bool GreyArc(StateId state, const Arc &arc) { return true; }
bool BlackArc(StateId state, const Arc &arc) { return true; }
void FinishState(StateId state) {
fst_->Final(state); // Visits super-final arc.
++nfinish_;
}
void FinishVisit() {}
StateId NumInitialized() { return ninit_; }
StateId NumFinished() { return nfinish_; }
private:
const Fst<Arc> *fst_;
StateId maxvisit_;
StateId ninit_;
StateId nfinish_;
};
// Copies input FST to mutable FST up to a state limit following queue order.
template <class A>
class PartialCopyVisitor : public CopyVisitor<A> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using CopyVisitor<A>::WhiteArc;
PartialCopyVisitor(MutableFst<Arc> *ofst, StateId maxvisit,
bool copy_grey = true, bool copy_black = true)
: CopyVisitor<A>(ofst), maxvisit_(maxvisit),
copy_grey_(copy_grey), copy_black_(copy_black) {}
void InitVisit(const Fst<A> &ifst) {
CopyVisitor<A>::InitVisit(ifst);
ninit_ = 0;
nfinish_ = 0;
}
bool InitState(StateId state, StateId root) {
CopyVisitor<A>::InitState(state, root);
++ninit_;
return ninit_ <= maxvisit_;
}
bool GreyArc(StateId state, const Arc &arc) {
if (copy_grey_) return CopyVisitor<A>::GreyArc(state, arc);
return true;
}
bool BlackArc(StateId state, const Arc &arc) {
if (copy_black_) return CopyVisitor<A>::BlackArc(state, arc);
return true;
}
void FinishState(StateId state) {
CopyVisitor<A>::FinishState(state);
++nfinish_;
}
void FinishVisit() {}
StateId NumInitialized() { return ninit_; }
StateId NumFinished() { return nfinish_; }
private:
StateId maxvisit_;
StateId ninit_;
StateId nfinish_;
const bool copy_grey_;
const bool copy_black_;
};
} // namespace fst
#endif // FST_VISIT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/util/usage.cc | #include "util/usage.hh"
#include "util/exception.hh"
#include <fstream>
#include <ostream>
#include <sstream>
#include <set>
#include <string>
#include <cstring>
#include <cctype>
#include <ctime>
#if defined(_WIN32) || defined(_WIN64)
// This code lifted from physmem.c in gnulib. See the copyright statement
// below.
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
/* MEMORYSTATUSEX is missing from older windows headers, so define
a local replacement. */
typedef struct
{
DWORD dwLength;
DWORD dwMemoryLoad;
DWORDLONG ullTotalPhys;
DWORDLONG ullAvailPhys;
DWORDLONG ullTotalPageFile;
DWORDLONG ullAvailPageFile;
DWORDLONG ullTotalVirtual;
DWORDLONG ullAvailVirtual;
DWORDLONG ullAvailExtendedVirtual;
} lMEMORYSTATUSEX;
// Is this really supposed to be defined like this?
typedef int WINBOOL;
typedef WINBOOL (WINAPI *PFN_MS_EX) (lMEMORYSTATUSEX*);
#else
#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#endif
#if defined(__MACH__) || defined(__FreeBSD__) || defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
#include <mach/task.h>
#include <mach/mach.h>
#endif
namespace util {
namespace {
#if defined(__MACH__)
typedef struct timeval Wall;
Wall GetWall() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv;
}
#elif defined(_WIN32) || defined(_WIN64)
typedef time_t Wall;
Wall GetWall() {
return time(NULL);
}
#else
typedef struct timespec Wall;
Wall GetWall() {
Wall ret;
UTIL_THROW_IF(-1 == clock_gettime(CLOCK_MONOTONIC, &ret), ErrnoException, "Could not get wall time");
return ret;
}
#endif
// gcc possible-unused function flags
#ifdef __GNUC__
double Subtract(time_t first, time_t second) __attribute__ ((unused));
double DoubleSec(time_t tv) __attribute__ ((unused));
#if !defined(_WIN32) && !defined(_WIN64)
double Subtract(const struct timeval &first, const struct timeval &second) __attribute__ ((unused));
double Subtract(const struct timespec &first, const struct timespec &second) __attribute__ ((unused));
double DoubleSec(const struct timeval &tv) __attribute__ ((unused));
double DoubleSec(const struct timespec &tv) __attribute__ ((unused));
#endif
#endif
// Some of these functions are only used on some platforms.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
// These all assume first > second
double Subtract(time_t first, time_t second) {
return difftime(first, second);
}
double DoubleSec(time_t tv) {
return static_cast<double>(tv);
}
#if !defined(_WIN32) && !defined(_WIN64)
double Subtract(const struct timeval &first, const struct timeval &second) {
return static_cast<double>(first.tv_sec - second.tv_sec) + static_cast<double>(first.tv_usec - second.tv_usec) / 1000000.0;
}
double Subtract(const struct timespec &first, const struct timespec &second) {
return static_cast<double>(first.tv_sec - second.tv_sec) + static_cast<double>(first.tv_nsec - second.tv_nsec) / 1000000000.0;
}
double DoubleSec(const struct timeval &tv) {
return static_cast<double>(tv.tv_sec) + (static_cast<double>(tv.tv_usec) / 1000000.0);
}
double DoubleSec(const struct timespec &tv) {
return static_cast<double>(tv.tv_sec) + (static_cast<double>(tv.tv_nsec) / 1000000000.0);
}
#endif
#ifdef __clang__
#pragma clang diagnostic pop
#endif
class RecordStart {
public:
RecordStart() {
started_ = GetWall();
}
const Wall &Started() const {
return started_;
}
private:
Wall started_;
};
const RecordStart kRecordStart;
const char *SkipSpaces(const char *at) {
for (; *at == ' ' || *at == '\t'; ++at) {}
return at;
}
} // namespace
double WallTime() {
return Subtract(GetWall(), kRecordStart.Started());
}
double CPUTime() {
#if defined(_WIN32) || defined(_WIN64)
return 0.0;
#elif defined(__MACH__) || defined(__FreeBSD__) || defined(__APPLE__)
struct rusage usage;
UTIL_THROW_IF(getrusage(RUSAGE_SELF, &usage), ErrnoException, "getrusage failed");
return DoubleSec(usage.ru_utime) + DoubleSec(usage.ru_stime);
#else
struct timespec usage;
UTIL_THROW_IF(clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &usage), ErrnoException, "clock_gettime failed?!");
return DoubleSec(usage);
#endif
}
double ThreadTime() {
#if defined(_WIN32) || defined(_WIN64)
// Output parameters for querying thread CPU usage:
FILETIME sys_time, user_time;
// Unused, but apparently need to be passed:
FILETIME c_time, e_time;
HANDLE this_thread = GetCurrentThread();
UTIL_THROW_IF(!GetThreadTimes(this_thread, &c_time, &e_time, &sys_time, &user_time), WindowsException, "GetThreadTime");
// Convert LPFILETIME to 64-bit number, and from there to double.
ULARGE_INTEGER sys_ticks, user_ticks;
sys_ticks.LowPart = sys_time.dwLowDateTime;
sys_ticks.HighPart = sys_time.dwHighDateTime;
user_ticks.LowPart = user_time.dwLowDateTime;
user_ticks.HighPart = user_time.dwHighDateTime;
const double ticks = double(sys_ticks.QuadPart + user_ticks.QuadPart);
// GetThreadTimes() reports in units of 100 nanoseconds, i.e. ten-millionths
// of a second.
return ticks / (10 * 1000 * 1000);
#elif defined(__MACH__) || defined(__FreeBSD__) || defined(__APPLE__)
struct task_basic_info t_info;
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
return 0.0;
#else
struct timespec usage;
UTIL_THROW_IF(clock_gettime(CLOCK_THREAD_CPUTIME_ID, &usage), ErrnoException, "clock_gettime failed?!");
return DoubleSec(usage);
#endif
}
uint64_t RSSMax() {
#if defined(_WIN32) || defined(_WIN64)
return 0;
#else
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage))
return 0;
return static_cast<uint64_t>(usage.ru_maxrss) * 1024;
#endif
}
void PrintUsage(std::ostream &out) {
#if !defined(_WIN32) && !defined(_WIN64)
// Linux doesn't set memory usage in getrusage :-(
std::set<std::string> headers;
headers.insert("VmPeak:");
headers.insert("VmRSS:");
headers.insert("Name:");
std::ifstream status("/proc/self/status", std::ios::in);
std::string header, value;
while ((status >> header) && getline(status, value)) {
if (headers.find(header) != headers.end()) {
out << header << SkipSpaces(value.c_str()) << '\t';
}
}
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage)) {
perror("getrusage");
return;
}
out << "RSSMax:" << usage.ru_maxrss << " kB" << '\t';
out << "user:" << DoubleSec(usage.ru_utime) << "\tsys:" << DoubleSec(usage.ru_stime) << '\t';
out << "CPU:" << CPUTime() << '\t';
#endif
out << "real:" << WallTime() << '\n';
}
/* Adapted from physmem.c in gnulib 831b84c59ef413c57a36b67344467d66a8a2ba70 */
/* Calculate the size of physical memory.
Copyright (C) 2000-2001, 2003, 2005-2006, 2009-2013 Free Software
Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* Written by Paul Eggert. */
uint64_t GuessPhysicalMemory() {
#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGESIZE);
if (pages != -1 && page_size != -1)
return static_cast<uint64_t>(pages) * static_cast<uint64_t>(page_size);
}
#endif
#ifdef HW_PHYSMEM
{ /* This works on *bsd and darwin. */
unsigned int physmem;
size_t len = sizeof physmem;
static int mib[2] = { CTL_HW, HW_PHYSMEM };
if (sysctl (mib, sizeof(mib) / sizeof(mib[0]), &physmem, &len, NULL, 0) == 0
&& len == sizeof (physmem))
return static_cast<uint64_t>(physmem);
}
#endif
#if defined(_WIN32) || defined(_WIN64)
{ /* this works on windows */
PFN_MS_EX pfnex;
HMODULE h = GetModuleHandle (TEXT("kernel32.dll"));
if (!h)
return 0;
/* Use GlobalMemoryStatusEx if available. */
if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx")))
{
lMEMORYSTATUSEX lms_ex;
lms_ex.dwLength = sizeof lms_ex;
if (!pfnex (&lms_ex))
return 0;
return lms_ex.ullTotalPhys;
}
/* Fall back to GlobalMemoryStatus which is always available.
but returns wrong results for physical memory > 4GB. */
else
{
MEMORYSTATUS ms;
GlobalMemoryStatus (&ms);
return ms.dwTotalPhys;
}
}
#endif
return 0;
}
namespace {
class SizeParseError : public Exception {
public:
explicit SizeParseError(const std::string &str) throw() {
*this << "Failed to parse " << str << " into a memory size ";
}
};
template <class Num> uint64_t ParseNum(const std::string &arg) {
std::stringstream stream(arg);
Num value;
stream >> value;
UTIL_THROW_IF_ARG(!stream, SizeParseError, (arg), "for the leading number.");
std::string after;
stream >> after;
UTIL_THROW_IF_ARG(after.size() > 1, SizeParseError, (arg), "because there are more than two characters after the number.");
std::string throwaway;
UTIL_THROW_IF_ARG(stream >> throwaway, SizeParseError, (arg), "because there was more cruft " << throwaway << " after the number.");
// Silly sort, using kilobytes as your default unit.
if (after.empty()) after = "K";
if (after == "%") {
uint64_t mem = GuessPhysicalMemory();
UTIL_THROW_IF_ARG(!mem, SizeParseError, (arg), "because % was specified but the physical memory size could not be determined.");
return static_cast<uint64_t>(static_cast<double>(value) * static_cast<double>(mem) / 100.0);
}
if (after == "k") after = "K";
std::string units("bKMGTPEZY");
std::string::size_type index = units.find(after[0]);
UTIL_THROW_IF_ARG(index == std::string::npos, SizeParseError, (arg), "the allowed suffixes are " << units << "%.");
for (std::string::size_type i = 0; i < index; ++i) {
value *= 1024;
}
return static_cast<uint64_t>(value);
}
} // namespace
uint64_t ParseSize(const std::string &arg) {
return arg.find('.') == std::string::npos ? ParseNum<double>(arg) : ParseNum<uint64_t>(arg);
}
} // namespace util
| 0 |
coqui_public_repos/Trainer | coqui_public_repos/Trainer/tests/test_train_mnist.py | import os
import torch
from tests.utils.mnist import MnistModel, MnistModelConfig
from trainer import Trainer, TrainerArgs
is_cuda = torch.cuda.is_available()
def test_train_mnist():
model = MnistModel()
trainer = Trainer(
TrainerArgs(), MnistModelConfig(), model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None
)
trainer.fit()
loss1 = trainer.keep_avg_train["avg_loss"]
trainer.fit()
loss2 = trainer.keep_avg_train["avg_loss"]
assert loss1 > loss2
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/script/arcsort.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/arcsort.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void ArcSort(MutableFstClass *fst, ArcSortType sort_type) {
ArcSortArgs args(fst, sort_type);
Apply<Operation<ArcSortArgs>>("ArcSort", fst->ArcType(), &args);
}
REGISTER_FST_OPERATION(ArcSort, StdArc, ArcSortArgs);
REGISTER_FST_OPERATION(ArcSort, LogArc, ArcSortArgs);
REGISTER_FST_OPERATION(ArcSort, Log64Arc, ArcSortArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT-models/luganda/itml | coqui_public_repos/STT-models/luganda/itml/v0.1.1/alphabet.txt |
'
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
r
s
t
u
v
w
x
y
z
| 0 |
coqui_public_repos/snakepit | coqui_public_repos/snakepit/bin/clean-service.sh | #!/usr/bin/env bash
lxc delete --force snakepit
lxc image delete snakepit
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstreweight.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_bool(to_final, false, "Push/reweight to final (vs. to initial) states");
int fstreweight_main(int argc, char **argv);
int main(int argc, char **argv) { return fstreweight_main(argc, argv); }
| 0 |
coqui_public_repos/STT-examples | coqui_public_repos/STT-examples/android_mic_streaming/gradle.properties | # Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx1536m
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
# AndroidX package structure to make it clearer which packages are bundled with the
# Android operating system, and which are packaged with your app's APK
# https://developer.android.com/topic/libraries/support-library/androidx-rn
android.useAndroidX=true
# Automatically convert third-party libraries to use AndroidX
android.enableJetifier=true
# Kotlin code style for this project: "official" or "obsolete":
kotlin.code.style=official
| 0 |
coqui_public_repos/snakepit | coqui_public_repos/snakepit/src/service.js | const cluster = require('cluster')
const Parallel = require('async-parallel')
const cpus = require('os').cpus().length
const log = require('./utils/logger.js')
const config = require('./config.js')
const models = require('./models')
const pitRunner = require('./pitRunner.js')
const scheduler = require('./scheduler.js')
async function startup () {
await models.sequelize.sync()
await Parallel.each(models.all, async model => await (model.startup || Function)())
await pitRunner.startup()
await scheduler.startup()
}
if (cluster.isMaster) {
cluster.on('exit', (deadWorker, code, signal) => {
if (code === 100) {
process.exit(100) // Preventing fork-loop on startup problems
}
var worker = cluster.fork();
log.error('Worker ' + deadWorker.process.pid + ' died.')
log.info('Worker ' + worker.process.pid + ' born.')
})
startup().then(() => {
for (let i = 0; i < cpus; i++) {
cluster.fork()
}
log.info('Snakepit daemon started')
}).catch(ex => {
log.error('Snakepit startup problem:', ex)
process.exit(1)
})
} else {
try {
const ws = require('ws')
const http = require('http')
const morgan = require('morgan')
const express = require('express')
const bodyParser = require('body-parser')
let app = express()
app.use(bodyParser.json({ limit: '50mb' }))
app.use(morgan('combined', {
skip: (req, res) => res.statusCode < 400 && !config.debugHttp
}))
app.use(require('./routes'))
app.use((err, req, res, next) => {
let message = err.message || 'Internal error'
let code = err.code || 500
log.error('ERROR', code, message)
if (err.stack) {
log.error(err.stack)
}
res.status(code).send({ message: message })
})
const wss = new ws.Server({ noServer: true })
let server = http.createServer(app)
server.on('upgrade', (req, socket, header) => {
let res = new http.ServerResponse(req)
let headerClone = new Buffer(header.length)
header.copy(headerClone)
res.assignSocket(socket)
res.on('finish', () => res.socket.destroy())
res.openSocket = cb => wss.handleUpgrade(req, socket, headerClone, cb)
return app(req, res)
})
server.listen(config.port, config.interface)
log.info('Snakepit service running on ' + config.interface + ':' + config.port)
} catch (ex) {
log.error('Failure during startup: ', ex, ex.stack)
process.exit(100)
}
}
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/android-cache-x86_64-android-29.yml | build:
template_file: generic_tc_caching-linux-opt-base.tyml
system_setup:
>
${java.packages_xenial.apt}
cache:
artifact_url: ${system.android_cache.x86_64.android_29.url}
artifact_namespace: ${system.android_cache.x86_64.android_29.namespace}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/android_cache-build.sh x86_64 android-29"
package: "taskcluster/android_cache-package.sh"
workerType: "${docker.smallTask}"
metadata:
name: "Builds Android cache x86_64 / android-29"
description: "Setup an Android SDK / emulator cache for Android / x86_64 android-29"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/compact/compact16_string-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/compact-fst.h>
namespace fst {
static FstRegisterer<CompactStringFst<StdArc, uint16>>
CompactStringFst_StdArc_uint16_registerer;
static FstRegisterer<CompactStringFst<LogArc, uint16>>
CompactStringFst_LogArc_uint16_registerer;
} // namespace fst
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/scriptworker-task-npm.yml | build:
template_file: simple-task.tyml
dependencies:
# Make sure builds are ready
- "node-package-gpu"
- "node-package-cpu"
- "node-package-tflite"
allowed:
- "tag"
ref_match: "refs/tags/"
upload_targets:
- "npm"
artifacts_deps:
python: []
cpp: []
java_aar: []
javascript:
# GPU package
- "node-package-gpu"
# CPU package with all archs
- "node-package-cpu"
# tflite package for non-default tflite archs
- "node-package-tflite"
nuget: []
metadata:
name: "DeepSpeech NPM Packages"
description: "Trigger Uploading of DeepSpeech Packages to NPM registry"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/script/weight-class.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Represents a generic weight in an FST; that is, represents a specific type
// of weight underneath while hiding that type from a client.
#ifndef FST_SCRIPT_WEIGHT_CLASS_H_
#define FST_SCRIPT_WEIGHT_CLASS_H_
#include <memory>
#include <ostream>
#include <string>
#include <fst/arc.h>
#include <fst/generic-register.h>
#include <fst/util.h>
#include <fst/weight.h>
namespace fst {
namespace script {
class WeightImplBase {
public:
virtual WeightImplBase *Copy() const = 0;
virtual void Print(std::ostream *o) const = 0;
virtual const string &Type() const = 0;
virtual string ToString() const = 0;
virtual bool operator==(const WeightImplBase &other) const = 0;
virtual bool operator!=(const WeightImplBase &other) const = 0;
virtual WeightImplBase &PlusEq(const WeightImplBase &other) = 0;
virtual WeightImplBase &TimesEq(const WeightImplBase &other) = 0;
virtual WeightImplBase &DivideEq(const WeightImplBase &other) = 0;
virtual WeightImplBase &PowerEq(size_t n) = 0;
virtual ~WeightImplBase() {}
};
template <class W>
class WeightClassImpl : public WeightImplBase {
public:
explicit WeightClassImpl(const W &weight) : weight_(weight) {}
WeightClassImpl<W> *Copy() const final {
return new WeightClassImpl<W>(weight_);
}
const string &Type() const final { return W::Type(); }
void Print(std::ostream *ostrm) const final { *ostrm << weight_; }
string ToString() const final {
string str;
WeightToStr(weight_, &str);
return str;
}
bool operator==(const WeightImplBase &other) const final {
const auto *typed_other = static_cast<const WeightClassImpl<W> *>(&other);
return weight_ == typed_other->weight_;
}
bool operator!=(const WeightImplBase &other) const final {
return !(*this == other);
}
WeightClassImpl<W> &PlusEq(const WeightImplBase &other) final {
const auto *typed_other = static_cast<const WeightClassImpl<W> *>(&other);
weight_ = Plus(weight_, typed_other->weight_);
return *this;
}
WeightClassImpl<W> &TimesEq(const WeightImplBase &other) final {
const auto *typed_other = static_cast<const WeightClassImpl<W> *>(&other);
weight_ = Times(weight_, typed_other->weight_);
return *this;
}
WeightClassImpl<W> &DivideEq(const WeightImplBase &other) final {
const auto *typed_other = static_cast<const WeightClassImpl<W> *>(&other);
weight_ = Divide(weight_, typed_other->weight_);
return *this;
}
WeightClassImpl<W> &PowerEq(size_t n) final {
weight_ = Power<W>(weight_, n);
return *this;
}
W *GetImpl() { return &weight_; }
private:
W weight_;
};
class WeightClass {
public:
WeightClass() = default;
template <class W>
explicit WeightClass(const W &weight)
: impl_(new WeightClassImpl<W>(weight)) {}
template <class W>
explicit WeightClass(const WeightClassImpl<W> &impl)
: impl_(new WeightClassImpl<W>(impl)) {}
WeightClass(const string &weight_type, const string &weight_str);
WeightClass(const WeightClass &other)
: impl_(other.impl_ ? other.impl_->Copy() : nullptr) {}
WeightClass &operator=(const WeightClass &other) {
impl_.reset(other.impl_ ? other.impl_->Copy() : nullptr);
return *this;
}
static constexpr const char *__ZERO__ = "__ZERO__"; // NOLINT
static WeightClass Zero(const string &weight_type);
static constexpr const char *__ONE__ = "__ONE__"; // NOLINT
static WeightClass One(const string &weight_type);
static constexpr const char *__NOWEIGHT__ = "__NOWEIGHT__"; // NOLINT
static WeightClass NoWeight(const string &weight_type);
template <class W>
const W *GetWeight() const {
if (W::Type() != impl_->Type()) {
return nullptr;
} else {
auto *typed_impl = static_cast<WeightClassImpl<W> *>(impl_.get());
return typed_impl->GetImpl();
}
}
string ToString() const { return (impl_) ? impl_->ToString() : "none"; }
const string &Type() const {
if (impl_) return impl_->Type();
static const string *const no_type = new string("none");
return *no_type;
}
bool WeightTypesMatch(const WeightClass &other, const string &op_name) const;
friend bool operator==(const WeightClass &lhs, const WeightClass &rhs);
friend WeightClass Plus(const WeightClass &lhs, const WeightClass &rhs);
friend WeightClass Times(const WeightClass &lhs, const WeightClass &rhs);
friend WeightClass Divide(const WeightClass &lhs, const WeightClass &rhs);
friend WeightClass Power(const WeightClass &w, size_t n);
private:
const WeightImplBase *GetImpl() const { return impl_.get(); }
WeightImplBase *GetImpl() { return impl_.get(); }
std::unique_ptr<WeightImplBase> impl_;
friend std::ostream &operator<<(std::ostream &o, const WeightClass &c);
};
bool operator==(const WeightClass &lhs, const WeightClass &rhs);
bool operator!=(const WeightClass &lhs, const WeightClass &rhs);
WeightClass Plus(const WeightClass &lhs, const WeightClass &rhs);
WeightClass Times(const WeightClass &lhs, const WeightClass &rhs);
WeightClass Divide(const WeightClass &lhs, const WeightClass &rhs);
WeightClass Power(const WeightClass &w, size_t n);
std::ostream &operator<<(std::ostream &o, const WeightClass &c);
// Registration for generic weight types.
using StrToWeightImplBaseT = WeightImplBase *(*)(const string &str,
const string &src,
size_t nline);
template <class W>
WeightImplBase *StrToWeightImplBase(const string &str, const string &src,
size_t nline) {
if (str == WeightClass::__ZERO__)
return new WeightClassImpl<W>(W::Zero());
else if (str == WeightClass::__ONE__)
return new WeightClassImpl<W>(W::One());
else if (str == WeightClass::__NOWEIGHT__)
return new WeightClassImpl<W>(W::NoWeight());
return new WeightClassImpl<W>(StrToWeight<W>(str, src, nline));
}
class WeightClassRegister : public GenericRegister<string, StrToWeightImplBaseT,
WeightClassRegister> {
protected:
string ConvertKeyToSoFilename(const string &key) const final {
string legal_type(key);
ConvertToLegalCSymbol(&legal_type);
return legal_type + ".so";
}
};
using WeightClassRegisterer = GenericRegisterer<WeightClassRegister>;
// Internal version; needs to be called by wrapper in order for macro args to
// expand.
#define REGISTER_FST_WEIGHT__(Weight, line) \
static WeightClassRegisterer weight_registerer##_##line( \
Weight::Type(), StrToWeightImplBase<Weight>)
// This layer is where __FILE__ and __LINE__ are expanded.
#define REGISTER_FST_WEIGHT_EXPANDER(Weight, line) \
REGISTER_FST_WEIGHT__(Weight, line)
// Macro for registering new weight types; clients call this.
#define REGISTER_FST_WEIGHT(Weight) \
REGISTER_FST_WEIGHT_EXPANDER(Weight, __LINE__)
} // namespace script
} // namespace fst
#endif // FST_SCRIPT_WEIGHT_CLASS_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/openfst.targets | <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" InitialTargets="_SelectOnlySources">
<PropertyGroup>
<!-- Keep separate tlog in each multi-bin subproject
(all projects, in fact, it does not hurt). Needs a trailing '\'. -->
<TLogLocation>$(IntDir)$(ProjectName).tlog\</TLogLocation>
<!-- Intentionally sharing, handled carefully. Quench the warning. -->
<IgnoreWarnIntDirSharingDetected Condition=" '$(MultiBin)' == 'true' ">true</IgnoreWarnIntDirSharingDetected>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ItemDefinitionGroup>
<ProjectReference>
<!-- Do not propagate to dependent projects the properties that
we set on recursive invocation. -->
<GlobalPropertiesToRemove>ProjectName;OnlySources</GlobalPropertiesToRemove>
</ProjectReference>
</ItemDefinitionGroup>
<!-- In an inner build only, shrink down the ClCompile collection to those
passed in the OnlySources property. No harm doing this as early as
possible, so register this as InitialTarget. -->
<Target Name="_SelectOnlySources" Condition=" '$(MultiBin)' == 'true' and '$(DesignTimeBuild)' != 'true' ">
<ItemGroup Condition=" '$(OnlySources)' != '' ">
<_OnlySources Include="$(OnlySources)" />
<ClCompile Remove="@(ClCompile)" Condition="'%(Identity)' != '@(_OnlySources)'" />
</ItemGroup>
</Target>
<!-- Override Build, Clean, Rebuild and certain IDE targets in a multi-bin outer build. -->
<Import Project="openfst-multibin.targets"
Condition=" '$(MultiBin)' == 'true' and '$(DesignTimeBuild)' != 'true' and '$(OnlySources)' == '' "/>
</Project> | 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstisomorphic.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
#include <fst/weight.h>
DEFINE_double(delta, fst::kDelta, "Comparison/quantization delta");
int fstisomorphic_main(int argc, char **argv);
int main(int argc, char **argv) { return fstisomorphic_main(argc, argv); }
| 0 |
coqui_public_repos/TTS/TTS/tts/layers | coqui_public_repos/TTS/TTS/tts/layers/feed_forward/decoder.py | import torch
from torch import nn
from TTS.tts.layers.generic.res_conv_bn import Conv1dBN, Conv1dBNBlock, ResidualConv1dBNBlock
from TTS.tts.layers.generic.transformer import FFTransformerBlock
from TTS.tts.layers.generic.wavenet import WNBlocks
from TTS.tts.layers.glow_tts.transformer import RelativePositionTransformer
class WaveNetDecoder(nn.Module):
"""WaveNet based decoder with a prenet and a postnet.
prenet: conv1d_1x1
postnet: 3 x [conv1d_1x1 -> relu] -> conv1d_1x1
TODO: Integrate speaker conditioning vector.
Note:
default wavenet parameters;
params = {
"num_blocks": 12,
"hidden_channels":192,
"kernel_size": 5,
"dilation_rate": 1,
"num_layers": 4,
"dropout_p": 0.05
}
Args:
in_channels (int): number of input channels.
out_channels (int): number of output channels.
hidden_channels (int): number of hidden channels for prenet and postnet.
params (dict): dictionary for residual convolutional blocks.
"""
def __init__(self, in_channels, out_channels, hidden_channels, c_in_channels, params):
super().__init__()
# prenet
self.prenet = torch.nn.Conv1d(in_channels, params["hidden_channels"], 1)
# wavenet layers
self.wn = WNBlocks(params["hidden_channels"], c_in_channels=c_in_channels, **params)
# postnet
self.postnet = [
torch.nn.Conv1d(params["hidden_channels"], hidden_channels, 1),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_channels, hidden_channels, 1),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_channels, hidden_channels, 1),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_channels, out_channels, 1),
]
self.postnet = nn.Sequential(*self.postnet)
def forward(self, x, x_mask=None, g=None):
x = self.prenet(x) * x_mask
x = self.wn(x, x_mask, g)
o = self.postnet(x) * x_mask
return o
class RelativePositionTransformerDecoder(nn.Module):
"""Decoder with Relative Positional Transformer.
Note:
Default params
params={
'hidden_channels_ffn': 128,
'num_heads': 2,
"kernel_size": 3,
"dropout_p": 0.1,
"num_layers": 8,
"rel_attn_window_size": 4,
"input_length": None
}
Args:
in_channels (int): number of input channels.
out_channels (int): number of output channels.
hidden_channels (int): number of hidden channels including Transformer layers.
params (dict): dictionary for residual convolutional blocks.
"""
def __init__(self, in_channels, out_channels, hidden_channels, params):
super().__init__()
self.prenet = Conv1dBN(in_channels, hidden_channels, 1, 1)
self.rel_pos_transformer = RelativePositionTransformer(in_channels, out_channels, hidden_channels, **params)
def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument
o = self.prenet(x) * x_mask
o = self.rel_pos_transformer(o, x_mask)
return o
class FFTransformerDecoder(nn.Module):
"""Decoder with FeedForwardTransformer.
Default params
params={
'hidden_channels_ffn': 1024,
'num_heads': 2,
"dropout_p": 0.1,
"num_layers": 6,
}
Args:
in_channels (int): number of input channels.
out_channels (int): number of output channels.
hidden_channels (int): number of hidden channels including Transformer layers.
params (dict): dictionary for residual convolutional blocks.
"""
def __init__(self, in_channels, out_channels, params):
super().__init__()
self.transformer_block = FFTransformerBlock(in_channels, **params)
self.postnet = nn.Conv1d(in_channels, out_channels, 1)
def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument
# TODO: handle multi-speaker
x_mask = 1 if x_mask is None else x_mask
o = self.transformer_block(x) * x_mask
o = self.postnet(o) * x_mask
return o
class ResidualConv1dBNDecoder(nn.Module):
"""Residual Convolutional Decoder as in the original Speedy Speech paper
TODO: Integrate speaker conditioning vector.
Note:
Default params
params = {
"kernel_size": 4,
"dilations": 4 * [1, 2, 4, 8] + [1],
"num_conv_blocks": 2,
"num_res_blocks": 17
}
Args:
in_channels (int): number of input channels.
out_channels (int): number of output channels.
hidden_channels (int): number of hidden channels including ResidualConv1dBNBlock layers.
params (dict): dictionary for residual convolutional blocks.
"""
def __init__(self, in_channels, out_channels, hidden_channels, params):
super().__init__()
self.res_conv_block = ResidualConv1dBNBlock(in_channels, hidden_channels, hidden_channels, **params)
self.post_conv = nn.Conv1d(hidden_channels, hidden_channels, 1)
self.postnet = nn.Sequential(
Conv1dBNBlock(
hidden_channels, hidden_channels, hidden_channels, params["kernel_size"], 1, num_conv_blocks=2
),
nn.Conv1d(hidden_channels, out_channels, 1),
)
def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument
o = self.res_conv_block(x, x_mask)
o = self.post_conv(o) + x
return self.postnet(o) * x_mask
class Decoder(nn.Module):
"""Decodes the expanded phoneme encoding into spectrograms
Args:
out_channels (int): number of output channels.
in_hidden_channels (int): input and hidden channels. Model keeps the input channels for the intermediate layers.
decoder_type (str): decoder layer types. 'transformers' or 'residual_conv_bn'. Default 'residual_conv_bn'.
decoder_params (dict): model parameters for specified decoder type.
c_in_channels (int): number of channels for conditional input.
Shapes:
- input: (B, C, T)
"""
# pylint: disable=dangerous-default-value
def __init__(
self,
out_channels,
in_hidden_channels,
decoder_type="residual_conv_bn",
decoder_params={
"kernel_size": 4,
"dilations": 4 * [1, 2, 4, 8] + [1],
"num_conv_blocks": 2,
"num_res_blocks": 17,
},
c_in_channels=0,
):
super().__init__()
if decoder_type.lower() == "relative_position_transformer":
self.decoder = RelativePositionTransformerDecoder(
in_channels=in_hidden_channels,
out_channels=out_channels,
hidden_channels=in_hidden_channels,
params=decoder_params,
)
elif decoder_type.lower() == "residual_conv_bn":
self.decoder = ResidualConv1dBNDecoder(
in_channels=in_hidden_channels,
out_channels=out_channels,
hidden_channels=in_hidden_channels,
params=decoder_params,
)
elif decoder_type.lower() == "wavenet":
self.decoder = WaveNetDecoder(
in_channels=in_hidden_channels,
out_channels=out_channels,
hidden_channels=in_hidden_channels,
c_in_channels=c_in_channels,
params=decoder_params,
)
elif decoder_type.lower() == "fftransformer":
self.decoder = FFTransformerDecoder(in_hidden_channels, out_channels, decoder_params)
else:
raise ValueError(f"[!] Unknown decoder type - {decoder_type}")
def forward(self, x, x_mask, g=None): # pylint: disable=unused-argument
"""
Args:
x: [B, C, T]
x_mask: [B, 1, T]
g: [B, C_g, 1]
"""
# TODO: implement multi-speaker
o = self.decoder(x, x_mask, g)
return o
| 0 |
coqui_public_repos/STT-examples/django_api_streaming | coqui_public_repos/STT-examples/django_api_streaming/stt_app/models.py | from django.db import models
# Create your models here.
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/tc-evaluate_tflite.sh | #!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
extract_python_versions "$1" "pyver" "pyver_pkg" "py_unicode_type" "pyconf" "pyalias"
bitrate=$2
set_ldc_sample_filename "${bitrate}"
download_data
virtualenv_activate "${pyalias}" "deepspeech"
deepspeech_pkg_url=$(get_python_pkg_url ${pyver_pkg} ${py_unicode_type})
set -o pipefail
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH pip install --verbose --only-binary :all: --upgrade ${deepspeech_pkg_url} | cat
pip install --upgrade pip==19.3.1 setuptools==45.0.0 wheel==0.33.6 | cat
pushd ${HOME}/DeepSpeech/ds
pip install --upgrade . | cat
popd
set +o pipefail
which deepspeech
deepspeech --version
pushd ${HOME}/DeepSpeech/ds/
python bin/import_ldc93s1.py data/smoke_test
python evaluate_tflite.py --model "${TASKCLUSTER_TMP_DIR}/${model_name_mmap}" --scorer data/smoke_test/pruned_lm.scorer --csv data/smoke_test/ldc93s1.csv
popd
virtualenv_deactivate "${pyalias}" "deepspeech"
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/product-weight.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Product weight set and associated semiring operation definitions.
#ifndef FST_PRODUCT_WEIGHT_H_
#define FST_PRODUCT_WEIGHT_H_
#include <string>
#include <utility>
#include <fst/pair-weight.h>
#include <fst/weight.h>
namespace fst {
// Product semiring: W1 * W2.
template <class W1, class W2>
class ProductWeight : public PairWeight<W1, W2> {
public:
using ReverseWeight =
ProductWeight<typename W1::ReverseWeight, typename W2::ReverseWeight>;
ProductWeight() {}
explicit ProductWeight(const PairWeight<W1, W2> &weight)
: PairWeight<W1, W2>(weight) {}
ProductWeight(W1 w1, W2 w2)
: PairWeight<W1, W2>(std::move(w1), std::move(w2)) {}
static const ProductWeight &Zero() {
static const ProductWeight zero(PairWeight<W1, W2>::Zero());
return zero;
}
static const ProductWeight &One() {
static const ProductWeight one(PairWeight<W1, W2>::One());
return one;
}
static const ProductWeight &NoWeight() {
static const ProductWeight no_weight(PairWeight<W1, W2>::NoWeight());
return no_weight;
}
static const string &Type() {
static const string *const type =
new string(W1::Type() + "_X_" + W2::Type());
return *type;
}
static constexpr uint64 Properties() {
return W1::Properties() & W2::Properties() &
(kLeftSemiring | kRightSemiring | kCommutative | kIdempotent);
}
ProductWeight Quantize(float delta = kDelta) const {
return ProductWeight(PairWeight<W1, W2>::Quantize(delta));
}
ReverseWeight Reverse() const {
return ReverseWeight(PairWeight<W1, W2>::Reverse());
}
};
template <class W1, class W2>
inline ProductWeight<W1, W2> Plus(const ProductWeight<W1, W2> &w1,
const ProductWeight<W1, W2> &w2) {
return ProductWeight<W1, W2>(Plus(w1.Value1(), w2.Value1()),
Plus(w1.Value2(), w2.Value2()));
}
template <class W1, class W2>
inline ProductWeight<W1, W2> Times(const ProductWeight<W1, W2> &w1,
const ProductWeight<W1, W2> &w2) {
return ProductWeight<W1, W2>(Times(w1.Value1(), w2.Value1()),
Times(w1.Value2(), w2.Value2()));
}
template <class W1, class W2>
inline ProductWeight<W1, W2> Divide(const ProductWeight<W1, W2> &w1,
const ProductWeight<W1, W2> &w2,
DivideType typ = DIVIDE_ANY) {
return ProductWeight<W1, W2>(Divide(w1.Value1(), w2.Value1(), typ),
Divide(w1.Value2(), w2.Value2(), typ));
}
// This function object generates weights by calling the underlying generators
// for the template weight types, like all other pair weight types. This is
// intended primarily for testing.
template <class W1, class W2>
class WeightGenerate<ProductWeight<W1, W2>> :
public WeightGenerate<PairWeight<W1, W2>> {
public:
using Weight = ProductWeight<W1, W2>;
using Generate = WeightGenerate<PairWeight<W1, W2>>;
explicit WeightGenerate(bool allow_zero = true) : Generate(allow_zero) {}
Weight operator()() const { return Weight(Generate::operator()()); }
};
} // namespace fst
#endif // FST_PRODUCT_WEIGHT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/replace-util.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Utility classes for the recursive replacement of FSTs (RTNs).
#ifndef FST_REPLACE_UTIL_H_
#define FST_REPLACE_UTIL_H_
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <fst/log.h>
#include <fst/connect.h>
#include <fst/mutable-fst.h>
#include <fst/topsort.h>
#include <fst/vector-fst.h>
namespace fst {
// This specifies what labels to output on the call or return arc. Note that
// REPLACE_LABEL_INPUT and REPLACE_LABEL_OUTPUT will produce transducers when
// applied to acceptors.
enum ReplaceLabelType {
// Epsilon labels on both input and output.
REPLACE_LABEL_NEITHER = 1,
// Non-epsilon labels on input and epsilon on output.
REPLACE_LABEL_INPUT = 2,
// Epsilon on input and non-epsilon on output.
REPLACE_LABEL_OUTPUT = 3,
// Non-epsilon labels on both input and output.
REPLACE_LABEL_BOTH = 4
};
// By default ReplaceUtil will copy the input label of the replace arc.
// The call_label_type and return_label_type options specify how to manage
// the labels of the call arc and the return arc of the replace FST
struct ReplaceUtilOptions {
int64_t root; // Root rule for expansion.
ReplaceLabelType call_label_type; // How to label call arc.
ReplaceLabelType return_label_type; // How to label return arc.
int64_t return_label; // Label to put on return arc.
explicit ReplaceUtilOptions(
int64_t root = kNoLabel,
ReplaceLabelType call_label_type = REPLACE_LABEL_INPUT,
ReplaceLabelType return_label_type = REPLACE_LABEL_NEITHER,
int64_t return_label = 0)
: root(root),
call_label_type(call_label_type),
return_label_type(return_label_type),
return_label(return_label) {}
// For backwards compatibility.
ReplaceUtilOptions(int64_t root, bool epsilon_replace_arc)
: ReplaceUtilOptions(root,
epsilon_replace_arc ? REPLACE_LABEL_NEITHER
: REPLACE_LABEL_INPUT) {}
};
// Every non-terminal on a path appears as the first label on that path in every
// FST associated with a given SCC of the replace dependency graph. This would
// be true if the SCC were formed from left-linear grammar rules.
constexpr uint8_t kReplaceSCCLeftLinear = 0x01;
// Every non-terminal on a path appears as the final label on that path in every
// FST associated with a given SCC of the replace dependency graph. This would
// be true if the SCC were formed from right-linear grammar rules.
constexpr uint8_t kReplaceSCCRightLinear = 0x02;
// The SCC in the replace dependency graph has more than one state or a
// self-loop.
constexpr uint8_t kReplaceSCCNonTrivial = 0x04;
// Defined in replace.h.
template <class Arc>
void Replace(
const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &,
MutableFst<Arc> *, const ReplaceUtilOptions &);
// Utility class for the recursive replacement of FSTs (RTNs). The user provides
// a set of label/FST pairs at construction. These are used by methods for
// testing cyclic dependencies and connectedness and doing RTN connection and
// specific FST replacement by label or for various optimization properties. The
// modified results can be obtained with the GetFstPairs() or
// GetMutableFstPairs() methods.
template <class Arc>
class ReplaceUtil {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstPair = std::pair<Label, const Fst<Arc> *>;
using MutableFstPair = std::pair<Label, MutableFst<Arc> *>;
using NonTerminalHash = std::unordered_map<Label, Label>;
// Constructs from mutable FSTs; FST ownership is given to ReplaceUtil.
ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs,
const ReplaceUtilOptions &opts);
// Constructs from FSTs; FST ownership is retained by caller.
ReplaceUtil(const std::vector<FstPair> &fst_pairs,
const ReplaceUtilOptions &opts);
// Constructs from ReplaceFst internals; FST ownership is retained by caller.
ReplaceUtil(const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array,
const NonTerminalHash &nonterminal_hash,
const ReplaceUtilOptions &opts);
~ReplaceUtil() {
for (Label i = 0; i < fst_array_.size(); ++i) delete fst_array_[i];
}
// True if the non-terminal dependencies are cyclic. Cyclic dependencies will
// result in an unexpandable FST.
bool CyclicDependencies() const {
GetDependencies(false);
return depprops_ & kCyclic;
}
// Returns the strongly-connected component ID in the dependency graph of the
// replace FSTS.
StateId SCC(Label label) const {
GetDependencies(false);
const auto it = nonterminal_hash_.find(label);
if (it == nonterminal_hash_.end()) return kNoStateId;
return depscc_[it->second];
}
// Returns properties for the strongly-connected component in the dependency
// graph of the replace FSTs. If the SCC is kReplaceSCCLeftLinear or
// kReplaceSCCRightLinear, that SCC can be represented as finite-state despite
// any cyclic dependencies, but not by the usual replacement operation (see
// fst/extensions/pdt/replace.h).
uint8_t SCCProperties(StateId scc_id) {
GetSCCProperties();
return depsccprops_[scc_id];
}
// Returns true if no useless FSTs, states or transitions are present in the
// RTN.
bool Connected() const {
GetDependencies(false);
uint64_t props = kAccessible | kCoAccessible;
for (Label i = 0; i < fst_array_.size(); ++i) {
if (!fst_array_[i]) continue;
if (fst_array_[i]->Properties(props, true) != props || !depaccess_[i]) {
return false;
}
}
return true;
}
// Removes useless FSTs, states and transitions from the RTN.
void Connect();
// Replaces FSTs specified by labels, unless there are cyclic dependencies.
void ReplaceLabels(const std::vector<Label> &labels);
// Replaces FSTs that have at most nstates states, narcs arcs and nnonterm
// non-terminals (updating in reverse dependency order), unless there are
// cyclic dependencies.
void ReplaceBySize(size_t nstates, size_t narcs, size_t nnonterms);
// Replaces singleton FSTS, unless there are cyclic dependencies.
void ReplaceTrivial() { ReplaceBySize(2, 1, 1); }
// Replaces non-terminals that have at most ninstances instances (updating in
// dependency order), unless there are cyclic dependencies.
void ReplaceByInstances(size_t ninstances);
// Replaces non-terminals that have only one instance, unless there are cyclic
// dependencies.
void ReplaceUnique() { ReplaceByInstances(1); }
// Returns label/FST pairs, retaining FST ownership.
void GetFstPairs(std::vector<FstPair> *fst_pairs);
// Returns label/mutable FST pairs, giving FST ownership over to the caller.
void GetMutableFstPairs(std::vector<MutableFstPair> *mutable_fst_pairs);
private:
// FST statistics.
struct ReplaceStats {
StateId nstates; // Number of states.
StateId nfinal; // Number of final states.
size_t narcs; // Number of arcs.
Label nnonterms; // Number of non-terminals in FST.
size_t nref; // Number of non-terminal instances referring to this FST.
// Number of times that ith FST references this FST
std::map<Label, size_t> inref;
// Number of times that this FST references the ith FST
std::map<Label, size_t> outref;
ReplaceStats() : nstates(0), nfinal(0), narcs(0), nnonterms(0), nref(0) {}
};
// Checks that Mutable FSTs exists, creating them if necessary.
void CheckMutableFsts();
// Computes the dependency graph for the RTN, computing dependency statistics
// if stats is true.
void GetDependencies(bool stats) const;
void ClearDependencies() const {
depfst_.DeleteStates();
stats_.clear();
depprops_ = 0;
depsccprops_.clear();
have_stats_ = false;
}
// Gets topological order of dependencies, returning false with cyclic input.
bool GetTopOrder(const Fst<Arc> &fst, std::vector<Label> *toporder) const;
// Updates statistics to reflect the replacement of the jth FST.
void UpdateStats(Label j);
// Computes the properties for the strongly-connected component in the
// dependency graph of the replace FSTs.
void GetSCCProperties() const;
Label root_label_; // Root non-terminal.
Label root_fst_; // Root FST ID.
ReplaceLabelType call_label_type_; // See Replace().
ReplaceLabelType return_label_type_; // See Replace().
int64_t return_label_; // See Replace().
std::vector<const Fst<Arc> *> fst_array_; // FST per ID.
std::vector<MutableFst<Arc> *> mutable_fst_array_; // Mutable FST per ID.
std::vector<Label> nonterminal_array_; // FST ID to non-terminal.
NonTerminalHash nonterminal_hash_; // Non-terminal to FST ID.
mutable VectorFst<Arc> depfst_; // FST ID dependencies.
mutable std::vector<StateId> depscc_; // FST SCC ID.
mutable std::vector<bool> depaccess_; // FST ID accessibility.
mutable uint64_t depprops_; // Dependency FST props.
mutable bool have_stats_; // Have dependency statistics?
mutable std::vector<ReplaceStats> stats_; // Per-FST statistics.
mutable std::vector<uint8_t> depsccprops_; // SCC properties.
ReplaceUtil(const ReplaceUtil &) = delete;
ReplaceUtil &operator=(const ReplaceUtil &) = delete;
};
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs,
const ReplaceUtilOptions &opts)
: root_label_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
mutable_fst_array_.push_back(nullptr);
nonterminal_array_.push_back(kNoLabel);
for (const auto &fst_pair : fst_pairs) {
const auto label = fst_pair.first;
auto *fst = fst_pair.second;
nonterminal_hash_[label] = fst_array_.size();
nonterminal_array_.push_back(label);
fst_array_.push_back(fst);
mutable_fst_array_.push_back(fst);
}
root_fst_ = nonterminal_hash_[root_label_];
if (!root_fst_) {
FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_;
}
}
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(const std::vector<FstPair> &fst_pairs,
const ReplaceUtilOptions &opts)
: root_label_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
nonterminal_array_.push_back(kNoLabel);
for (const auto &fst_pair : fst_pairs) {
const auto label = fst_pair.first;
const auto *fst = fst_pair.second;
nonterminal_hash_[label] = fst_array_.size();
nonterminal_array_.push_back(label);
fst_array_.push_back(fst->Copy());
}
root_fst_ = nonterminal_hash_[root_label_];
if (!root_fst_) {
FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_;
}
}
template <class Arc>
ReplaceUtil<Arc>::ReplaceUtil(
const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array,
const NonTerminalHash &nonterminal_hash, const ReplaceUtilOptions &opts)
: root_fst_(opts.root),
call_label_type_(opts.call_label_type),
return_label_type_(opts.return_label_type),
return_label_(opts.return_label),
nonterminal_array_(fst_array.size()),
nonterminal_hash_(nonterminal_hash),
depprops_(0),
have_stats_(false) {
fst_array_.push_back(nullptr);
for (size_t i = 1; i < fst_array.size(); ++i) {
fst_array_.push_back(fst_array[i]->Copy());
}
for (auto it = nonterminal_hash.begin(); it != nonterminal_hash.end(); ++it) {
nonterminal_array_[it->second] = it->first;
}
root_label_ = nonterminal_array_[root_fst_];
}
template <class Arc>
void ReplaceUtil<Arc>::GetDependencies(bool stats) const {
if (depfst_.NumStates() > 0) {
if (stats && !have_stats_) {
ClearDependencies();
} else {
return;
}
}
have_stats_ = stats;
if (have_stats_) stats_.reserve(fst_array_.size());
for (Label i = 0; i < fst_array_.size(); ++i) {
depfst_.AddState();
depfst_.SetFinal(i, Weight::One());
if (have_stats_) stats_.push_back(ReplaceStats());
}
depfst_.SetStart(root_fst_);
// An arc from each state (representing the FST) to the state representing the
// FST being replaced
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto *ifst = fst_array_[i];
if (!ifst) continue;
for (StateIterator<Fst<Arc>> siter(*ifst); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
if (have_stats_) {
++stats_[i].nstates;
if (ifst->Final(s) != Weight::Zero()) ++stats_[i].nfinal;
}
for (ArcIterator<Fst<Arc>> aiter(*ifst, s); !aiter.Done();
aiter.Next()) {
if (have_stats_) ++stats_[i].narcs;
const auto &arc = aiter.Value();
auto it = nonterminal_hash_.find(arc.olabel);
if (it != nonterminal_hash_.end()) {
const auto j = it->second;
depfst_.AddArc(i, Arc(arc.olabel, arc.olabel, Weight::One(), j));
if (have_stats_) {
++stats_[i].nnonterms;
++stats_[j].nref;
++stats_[j].inref[i];
++stats_[i].outref[j];
}
}
}
}
}
// Computes accessibility info.
SccVisitor<Arc> scc_visitor(&depscc_, &depaccess_, nullptr, &depprops_);
DfsVisit(depfst_, &scc_visitor);
}
template <class Arc>
void ReplaceUtil<Arc>::UpdateStats(Label j) {
if (!have_stats_) {
FSTERROR() << "ReplaceUtil::UpdateStats: Stats not available";
return;
}
if (j == root_fst_) return; // Can't replace root.
for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) {
const auto i = in->first;
const auto ni = in->second;
stats_[i].nstates += stats_[j].nstates * ni;
stats_[i].narcs += (stats_[j].narcs + 1) * ni;
stats_[i].nnonterms += (stats_[j].nnonterms - 1) * ni;
stats_[i].outref.erase(j);
for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end();
++out) {
const auto k = out->first;
const auto nk = out->second;
stats_[i].outref[k] += ni * nk;
}
}
for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end();
++out) {
const auto k = out->first;
const auto nk = out->second;
stats_[k].nref -= nk;
stats_[k].inref.erase(j);
for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) {
const auto i = in->first;
const auto ni = in->second;
stats_[k].inref[i] += ni * nk;
stats_[k].nref += ni * nk;
}
}
}
template <class Arc>
void ReplaceUtil<Arc>::CheckMutableFsts() {
if (mutable_fst_array_.empty()) {
for (Label i = 0; i < fst_array_.size(); ++i) {
if (!fst_array_[i]) {
mutable_fst_array_.push_back(nullptr);
} else {
mutable_fst_array_.push_back(new VectorFst<Arc>(*fst_array_[i]));
delete fst_array_[i];
fst_array_[i] = mutable_fst_array_[i];
}
}
}
}
template <class Arc>
void ReplaceUtil<Arc>::Connect() {
CheckMutableFsts();
static constexpr auto props = kAccessible | kCoAccessible;
for (auto *mutable_fst : mutable_fst_array_) {
if (!mutable_fst) continue;
if (mutable_fst->Properties(props, false) != props) {
fst::Connect(mutable_fst);
}
}
GetDependencies(false);
for (Label i = 0; i < mutable_fst_array_.size(); ++i) {
auto *fst = mutable_fst_array_[i];
if (fst && !depaccess_[i]) {
delete fst;
fst_array_[i] = nullptr;
mutable_fst_array_[i] = nullptr;
}
}
ClearDependencies();
}
template <class Arc>
bool ReplaceUtil<Arc>::GetTopOrder(const Fst<Arc> &fst,
std::vector<Label> *toporder) const {
// Finds topological order of dependencies.
std::vector<StateId> order;
bool acyclic = false;
TopOrderVisitor<Arc> top_order_visitor(&order, &acyclic);
DfsVisit(fst, &top_order_visitor);
if (!acyclic) {
LOG(WARNING) << "ReplaceUtil::GetTopOrder: Cyclical label dependencies";
return false;
}
toporder->resize(order.size());
for (Label i = 0; i < order.size(); ++i) (*toporder)[order[i]] = i;
return true;
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceLabels(const std::vector<Label> &labels) {
CheckMutableFsts();
std::unordered_set<Label> label_set;
for (const auto label : labels) {
// Can't replace root.
if (label != root_label_) label_set.insert(label);
}
// Finds FST dependencies restricted to the labels requested.
GetDependencies(false);
VectorFst<Arc> pfst(depfst_);
for (StateId i = 0; i < pfst.NumStates(); ++i) {
std::vector<Arc> arcs;
for (ArcIterator<VectorFst<Arc>> aiter(pfst, i); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto label = nonterminal_array_[arc.nextstate];
if (label_set.count(label) > 0) arcs.push_back(arc);
}
pfst.DeleteArcs(i);
for (const auto &arc : arcs) pfst.AddArc(i, arc);
}
std::vector<Label> toporder;
if (!GetTopOrder(pfst, &toporder)) {
ClearDependencies();
return;
}
// Visits FSTs in reverse topological order of dependencies and performs
// replacements.
for (Label o = toporder.size() - 1; o >= 0; --o) {
std::vector<FstPair> fst_pairs;
auto s = toporder[o];
for (ArcIterator<VectorFst<Arc>> aiter(pfst, s); !aiter.Done();
aiter.Next()) {
const auto &arc = aiter.Value();
const auto label = nonterminal_array_[arc.nextstate];
const auto *fst = fst_array_[arc.nextstate];
fst_pairs.push_back(std::make_pair(label, fst));
}
if (fst_pairs.empty()) continue;
const auto label = nonterminal_array_[s];
const auto *fst = fst_array_[s];
fst_pairs.push_back(std::make_pair(label, fst));
const ReplaceUtilOptions opts(label, call_label_type_, return_label_type_,
return_label_);
Replace(fst_pairs, mutable_fst_array_[s], opts);
}
ClearDependencies();
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceBySize(size_t nstates, size_t narcs,
size_t nnonterms) {
std::vector<Label> labels;
GetDependencies(true);
std::vector<Label> toporder;
if (!GetTopOrder(depfst_, &toporder)) {
ClearDependencies();
return;
}
for (Label o = toporder.size() - 1; o >= 0; --o) {
const auto j = toporder[o];
if (stats_[j].nstates <= nstates && stats_[j].narcs <= narcs &&
stats_[j].nnonterms <= nnonterms) {
labels.push_back(nonterminal_array_[j]);
UpdateStats(j);
}
}
ReplaceLabels(labels);
}
template <class Arc>
void ReplaceUtil<Arc>::ReplaceByInstances(size_t ninstances) {
std::vector<Label> labels;
GetDependencies(true);
std::vector<Label> toporder;
if (!GetTopOrder(depfst_, &toporder)) {
ClearDependencies();
return;
}
for (Label o = 0; o < toporder.size(); ++o) {
const auto j = toporder[o];
if (stats_[j].nref <= ninstances) {
labels.push_back(nonterminal_array_[j]);
UpdateStats(j);
}
}
ReplaceLabels(labels);
}
template <class Arc>
void ReplaceUtil<Arc>::GetFstPairs(std::vector<FstPair> *fst_pairs) {
CheckMutableFsts();
fst_pairs->clear();
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto label = nonterminal_array_[i];
const auto *fst = fst_array_[i];
if (!fst) continue;
fst_pairs->push_back(std::make_pair(label, fst));
}
}
template <class Arc>
void ReplaceUtil<Arc>::GetMutableFstPairs(
std::vector<MutableFstPair> *mutable_fst_pairs) {
CheckMutableFsts();
mutable_fst_pairs->clear();
for (Label i = 0; i < mutable_fst_array_.size(); ++i) {
const auto label = nonterminal_array_[i];
const auto *fst = mutable_fst_array_[i];
if (!fst) continue;
mutable_fst_pairs->push_back(std::make_pair(label, fst->Copy()));
}
}
template <class Arc>
void ReplaceUtil<Arc>::GetSCCProperties() const {
if (!depsccprops_.empty()) return;
GetDependencies(false);
if (depscc_.empty()) return;
for (StateId scc = 0; scc < depscc_.size(); ++scc) {
depsccprops_.push_back(kReplaceSCCLeftLinear | kReplaceSCCRightLinear);
}
if (!(depprops_ & kCyclic)) return; // No cyclic dependencies.
// Checks for self-loops in the dependency graph.
for (StateId scc = 0; scc < depscc_.size(); ++scc) {
for (ArcIterator<Fst<Arc> > aiter(depfst_, scc);
!aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
if (arc.nextstate == scc) { // SCC has a self loop.
depsccprops_[scc] |= kReplaceSCCNonTrivial;
}
}
}
std::vector<bool> depscc_visited(depscc_.size(), false);
for (Label i = 0; i < fst_array_.size(); ++i) {
const auto *fst = fst_array_[i];
if (!fst) continue;
const auto depscc = depscc_[i];
if (depscc_visited[depscc]) { // SCC has more than one state.
depsccprops_[depscc] |= kReplaceSCCNonTrivial;
}
depscc_visited[depscc] = true;
std::vector<StateId> fstscc; // SCCs of the current FST.
uint64_t fstprops;
SccVisitor<Arc> scc_visitor(&fstscc, nullptr, nullptr, &fstprops);
DfsVisit(*fst, &scc_visitor);
for (StateIterator<Fst<Arc>> siter(*fst); !siter.Done(); siter.Next()) {
const auto s = siter.Value();
for (ArcIterator<Fst<Arc>> aiter(*fst, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
auto it = nonterminal_hash_.find(arc.olabel);
if (it == nonterminal_hash_.end() || depscc_[it->second] != depscc) {
continue; // Skips if a terminal or a non-terminal not in SCC.
}
const bool arc_in_cycle = fstscc[s] == fstscc[arc.nextstate];
// Left linear iff all non-terminals are initial.
if (s != fst->Start() || arc_in_cycle) {
depsccprops_[depscc] &= ~kReplaceSCCLeftLinear;
}
// Right linear iff all non-terminals are final.
if (fst->Final(arc.nextstate) == Weight::Zero() || arc_in_cycle) {
depsccprops_[depscc] &= ~kReplaceSCCRightLinear;
}
}
}
}
}
} // namespace fst
#endif // FST_REPLACE_UTIL_H_
| 0 |
coqui_public_repos/TTS/TTS/tts/utils | coqui_public_repos/TTS/TTS/tts/utils/monotonic_align/core.pyx | import numpy as np
cimport cython
cimport numpy as np
from cython.parallel import prange
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_x, int t_y, float max_neg_val) nogil:
cdef int x
cdef int y
cdef float v_prev
cdef float v_cur
cdef float tmp
cdef int index = t_x - 1
for y in range(t_y):
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
if x == y:
v_cur = max_neg_val
else:
v_cur = value[x, y-1]
if x == 0:
if y == 0:
v_prev = 0.
else:
v_prev = max_neg_val
else:
v_prev = value[x-1, y-1]
value[x, y] = max(v_cur, v_prev) + value[x, y]
for y in range(t_y - 1, -1, -1):
path[index, y] = 1
if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]):
index = index - 1
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil:
cdef int b = values.shape[0]
cdef int i
for i in prange(b, nogil=True):
maximum_path_each(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val)
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/lock.h | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Google-compatibility locking declarations and inline definitions.
#ifndef FST_LIB_LOCK_H_
#define FST_LIB_LOCK_H_
#include <mutex>
namespace fst {
using namespace std;
class Mutex {
public:
Mutex() {}
inline void Lock() { mu_.lock(); }
inline void Unlock() { mu_.unlock(); }
private:
std::mutex mu_;
Mutex(const Mutex &) = delete;
Mutex &operator=(const Mutex &) = delete;
};
class MutexLock {
public:
explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
~MutexLock() { mu_->Unlock(); }
private:
Mutex *mu_;
MutexLock(const MutexLock &) = delete;
MutexLock &operator=(const MutexLock &) = delete;
};
// Currently, we don't use a separate reader lock.
// TODO(kbg): Implement this with std::shared_mutex once C++17 becomes widely
// available.
using ReaderMutexLock = MutexLock;
} // namespace fst
#endif // FST_LIB_LOCK_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/arcsort.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions and classes to sort arcs in an FST.
#ifndef FST_ARCSORT_H_
#define FST_ARCSORT_H_
#include <algorithm>
#include <string>
#include <vector>
#include <fst/cache.h>
#include <fst/state-map.h>
#include <fst/test-properties.h>
namespace fst {
template <class Arc, class Compare>
class ArcSortMapper {
public:
using FromArc = Arc;
using ToArc = Arc;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
ArcSortMapper(const Fst<Arc> &fst, const Compare &comp)
: fst_(fst), comp_(comp), i_(0) {}
// Allows updating Fst argument; pass only if changed.
ArcSortMapper(const ArcSortMapper<Arc, Compare> &mapper,
const Fst<Arc> *fst = nullptr)
: fst_(fst ? *fst : mapper.fst_), comp_(mapper.comp_), i_(0) {}
StateId Start() { return fst_.Start(); }
Weight Final(StateId s) const { return fst_.Final(s); }
void SetState(StateId s) {
i_ = 0;
arcs_.clear();
arcs_.reserve(fst_.NumArcs(s));
for (ArcIterator<Fst<Arc>> aiter(fst_, s); !aiter.Done(); aiter.Next()) {
arcs_.push_back(aiter.Value());
}
std::sort(arcs_.begin(), arcs_.end(), comp_);
}
bool Done() const { return i_ >= arcs_.size(); }
const Arc &Value() const { return arcs_[i_]; }
void Next() { ++i_; }
MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; }
MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; }
uint64 Properties(uint64 props) const { return comp_.Properties(props); }
private:
const Fst<Arc> &fst_;
const Compare &comp_;
std::vector<Arc> arcs_;
ssize_t i_; // current arc position
ArcSortMapper &operator=(const ArcSortMapper &) = delete;
};
// Sorts the arcs in an FST according to function object 'comp' of type Compare.
// This version modifies its input. Comparison function objects ILabelCompare
// and OLabelCompare are provided by the library. In general, Compare must meet
// the requirements for a comparison function object (e.g., similar to those
// used by std::sort). It must also have a member Properties(uint64) that
// specifies the known properties of the sorted FST; it takes as argument the
// input FST's known properties before the sort.
//
// Complexity:
//
// - Time: O(v d log d)
// - Space: O(d)
//
// where v = # of states and d = maximum out-degree.
template <class Arc, class Compare>
void ArcSort(MutableFst<Arc> *fst, Compare comp) {
ArcSortMapper<Arc, Compare> mapper(*fst, comp);
StateMap(fst, mapper);
}
using ArcSortFstOptions = CacheOptions;
// Sorts the arcs in an FST according to function object 'comp' of type Compare.
// This version is a delayed FST. Comparsion function objects ILabelCompare and
// OLabelCompare are provided by the library. In general, Compare must meet the
// requirements for a comparision function object (e.g., similar to those
// used by std::sort). It must also have a member Properties(uint64) that
// specifies the known properties of the sorted FST; it takes as argument the
// input FST's known properties.
//
// Complexity:
//
// - Time: O(v d log d)
// - Space: O(d)
//
// where v = # of states visited, d = maximum out-degree of states visited.
// Constant time and space to visit an input state is assumed and exclusive of
// caching.
template <class Arc, class Compare>
class ArcSortFst : public StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>> {
using StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>>::GetImpl;
public:
using StateId = typename Arc::StateId;
using Mapper = ArcSortMapper<Arc, Compare>;
ArcSortFst(const Fst<Arc> &fst, const Compare &comp)
: StateMapFst<Arc, Arc, Mapper>(fst,
ArcSortMapper<Arc, Compare>(fst, comp)) {}
ArcSortFst(const Fst<Arc> &fst, const Compare &comp,
const ArcSortFstOptions &opts)
: StateMapFst<Arc, Arc, Mapper>(fst, Mapper(fst, comp), opts) {}
// See Fst<>::Copy() for doc.
ArcSortFst(const ArcSortFst<Arc, Compare> &fst, bool safe = false)
: StateMapFst<Arc, Arc, Mapper>(fst, safe) {}
// Gets a copy of this ArcSortFst. See Fst<>::Copy() for further doc.
ArcSortFst<Arc, Compare> *Copy(bool safe = false) const override {
return new ArcSortFst(*this, safe);
}
size_t NumArcs(StateId s) const override {
return GetImpl()->GetFst()->NumArcs(s);
}
size_t NumInputEpsilons(StateId s) const override {
return GetImpl()->GetFst()->NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) const override {
return GetImpl()->GetFst()->NumOutputEpsilons(s);
}
};
// Specialization for ArcSortFst.
template <class Arc, class Compare>
class StateIterator<ArcSortFst<Arc, Compare>>
: public StateIterator<StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>>> {
public:
explicit StateIterator(const ArcSortFst<Arc, Compare> &fst)
: StateIterator<StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>>>(fst) {
}
};
// Specialization for ArcSortFst.
template <class Arc, class Compare>
class ArcIterator<ArcSortFst<Arc, Compare>>
: public ArcIterator<StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>>> {
public:
ArcIterator(const ArcSortFst<Arc, Compare> &fst, typename Arc::StateId s)
: ArcIterator<StateMapFst<Arc, Arc, ArcSortMapper<Arc, Compare>>>(fst,
s) {}
};
// Compare class for comparing input labels of arcs.
template <class Arc>
class ILabelCompare {
public:
ILabelCompare() {}
bool operator()(const Arc &arc1, const Arc &arc2) const {
return arc1.ilabel < arc2.ilabel;
}
uint64 Properties(uint64 props) const {
return (props & kArcSortProperties) | kILabelSorted |
(props & kAcceptor ? kOLabelSorted : 0);
}
};
// Compare class for comparing output labels of arcs.
template <class Arc>
class OLabelCompare {
public:
OLabelCompare() {}
bool operator()(const Arc &arc1, const Arc &arc2) const {
return arc1.olabel < arc2.olabel;
}
uint64 Properties(uint64 props) const {
return (props & kArcSortProperties) | kOLabelSorted |
(props & kAcceptor ? kILabelSorted : 0);
}
};
// Useful aliases when using StdArc.
template <class Compare>
using StdArcSortFst = ArcSortFst<StdArc, Compare>;
using StdILabelCompare = ILabelCompare<StdArc>;
using StdOLabelCompare = OLabelCompare<StdArc>;
} // namespace fst
#endif // FST_ARCSORT_H_
| 0 |
coqui_public_repos/STT/native_client | coqui_public_repos/STT/native_client/ctcdecode/COPYING | Decoder sources originally imported from https://github.com/parlance/ctcdecode, commit 140b45860cec6671fb0bf6dbb675073241c0f9b0
Decoder sources are under the MIT license (LICENSE.parlance).
Binding code adapted from https://github.com/PaddlePaddle/DeepSpeech/tree/develop/decoders/swig, commit 3ea19973c66a6a10320888ba47a8857bebf5abfa
Binding code are under the Apache License (LICENSE.paddlepaddle).
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/lookahead/arc_lookahead-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/matcher-fst.h>
namespace fst {
static FstRegisterer<StdArcLookAheadFst> ArcLookAheadFst_StdArc_registerer;
static FstRegisterer<MatcherFst<
ConstFst<LogArc>, ArcLookAheadMatcher<SortedMatcher<ConstFst<LogArc>>>,
arc_lookahead_fst_type>>
ArcLookAheadFst_LogArc_registerer;
static FstRegisterer<MatcherFst<
ConstFst<Log64Arc>, ArcLookAheadMatcher<SortedMatcher<ConstFst<Log64Arc>>>,
arc_lookahead_fst_type>>
ArcLookAheadFst_Log64Arc_registerer;
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/minimize.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions and classes to minimize an FST.
#ifndef FST_MINIMIZE_H_
#define FST_MINIMIZE_H_
#include <cmath>
#include <algorithm>
#include <map>
#include <queue>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/arcsort.h>
#include <fst/connect.h>
#include <fst/dfs-visit.h>
#include <fst/encode.h>
#include <fst/factor-weight.h>
#include <fst/fst.h>
#include <fst/mutable-fst.h>
#include <fst/partition.h>
#include <fst/push.h>
#include <fst/queue.h>
#include <fst/reverse.h>
#include <fst/shortest-distance.h>
#include <fst/state-map.h>
namespace fst {
namespace internal {
// Comparator for creating partition.
template <class Arc>
class StateComparator {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
StateComparator(const Fst<Arc> &fst, const Partition<StateId> &partition)
: fst_(fst), partition_(partition) {}
// Compares state x with state y based on sort criteria.
bool operator()(const StateId x, const StateId y) const {
// Checks for final state equivalence.
const auto xfinal = fst_.Final(x).Hash();
const auto yfinal = fst_.Final(y).Hash();
if (xfinal < yfinal) {
return true;
} else if (xfinal > yfinal) {
return false;
}
// Checks for number of arcs.
if (fst_.NumArcs(x) < fst_.NumArcs(y)) return true;
if (fst_.NumArcs(x) > fst_.NumArcs(y)) return false;
// If the number of arcs are equal, checks for arc match.
for (ArcIterator<Fst<Arc>> aiter1(fst_, x), aiter2(fst_, y);
!aiter1.Done() && !aiter2.Done(); aiter1.Next(), aiter2.Next()) {
const auto &arc1 = aiter1.Value();
const auto &arc2 = aiter2.Value();
if (arc1.ilabel < arc2.ilabel) return true;
if (arc1.ilabel > arc2.ilabel) return false;
if (partition_.ClassId(arc1.nextstate) <
partition_.ClassId(arc2.nextstate))
return true;
if (partition_.ClassId(arc1.nextstate) >
partition_.ClassId(arc2.nextstate))
return false;
}
return false;
}
private:
const Fst<Arc> &fst_;
const Partition<StateId> &partition_;
};
// Computes equivalence classes for cyclic unweighted acceptors. For cyclic
// minimization we use the classic Hopcroft minimization algorithm, which has
// complexity O(E log V) where E is the number of arcs and V is the number of
// states.
//
// For more information, see:
//
// Hopcroft, J. 1971. An n Log n algorithm for minimizing states in a finite
// automaton. Ms, Stanford University.
//
// Note: the original presentation of the paper was for a finite automaton (==
// deterministic, unweighted acceptor), but we also apply it to the
// nondeterministic case, where it is also applicable as long as the semiring is
// idempotent (if the semiring is not idempotent, there are some complexities
// in keeping track of the weight when there are multiple arcs to states that
// will be merged, and we don't deal with this).
template <class Arc, class Queue>
class CyclicMinimizer {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using ClassId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using RevArc = ReverseArc<Arc>;
explicit CyclicMinimizer(const ExpandedFst<Arc> &fst) {
Initialize(fst);
Compute(fst);
}
const Partition<StateId> &GetPartition() const { return P_; }
private:
// StateILabelHasher is a hashing object that computes a hash-function
// of an FST state that depends only on the set of ilabels on arcs leaving
// the state [note: it assumes that the arcs are ilabel-sorted].
// In order to work correctly for non-deterministic automata, multiple
// instances of the same ilabel count the same as a single instance.
class StateILabelHasher {
public:
explicit StateILabelHasher(const Fst<Arc> &fst) : fst_(fst) {}
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
size_t operator()(const StateId s) {
const size_t p1 = 7603;
const size_t p2 = 433024223;
size_t result = p2;
size_t current_ilabel = kNoLabel;
for (ArcIterator<Fst<Arc>> aiter(fst_, s); !aiter.Done(); aiter.Next()) {
Label this_ilabel = aiter.Value().ilabel;
if (this_ilabel != current_ilabel) { // Ignores repeats.
result = p1 * result + this_ilabel;
current_ilabel = this_ilabel;
}
}
return result;
}
private:
const Fst<Arc> &fst_;
};
class ArcIterCompare {
public:
explicit ArcIterCompare(const Partition<StateId> &partition)
: partition_(partition) {}
ArcIterCompare(const ArcIterCompare &comp) : partition_(comp.partition_) {}
// Compares two iterators based on their input labels.
bool operator()(const ArcIterator<Fst<RevArc>> *x,
const ArcIterator<Fst<RevArc>> *y) const {
const auto &xarc = x->Value();
const auto &yarc = y->Value();
return xarc.ilabel > yarc.ilabel;
}
private:
const Partition<StateId> &partition_;
};
using ArcIterQueue =
std::priority_queue<ArcIterator<Fst<RevArc>> *,
std::vector<ArcIterator<Fst<RevArc>> *>,
ArcIterCompare>;
private:
// Prepartitions the space into equivalence classes. We ensure that final and
// non-final states always go into different equivalence classes, and we use
// class StateILabelHasher to make sure that most of the time, states with
// different sets of ilabels on arcs leaving them, go to different partitions.
// Note: for the O(n) guarantees we don't rely on the goodness of this
// hashing function---it just provides a bonus speedup.
void PrePartition(const ExpandedFst<Arc> &fst) {
VLOG(5) << "PrePartition";
StateId next_class = 0;
auto num_states = fst.NumStates();
// Allocates a temporary vector to store the initial class mappings, so that
// we can allocate the classes all at once.
std::vector<StateId> state_to_initial_class(num_states);
{
// We maintain two maps from hash-value to class---one for final states
// (final-prob == One()) and one for non-final states
// (final-prob == Zero()). We are processing unweighted acceptors, so the
// are the only two possible values.
using HashToClassMap = std::unordered_map<size_t, StateId>;
HashToClassMap hash_to_class_nonfinal;
HashToClassMap hash_to_class_final;
StateILabelHasher hasher(fst);
for (StateId s = 0; s < num_states; ++s) {
size_t hash = hasher(s);
HashToClassMap &this_map =
(fst.Final(s) != Weight::Zero() ? hash_to_class_final
: hash_to_class_nonfinal);
// Avoids two map lookups by using 'insert' instead of 'find'.
auto p = this_map.insert(std::make_pair(hash, next_class));
state_to_initial_class[s] = p.second ? next_class++ : p.first->second;
}
// Lets the unordered_maps go out of scope before we allocate the classes,
// to reduce the maximum amount of memory used.
}
P_.AllocateClasses(next_class);
for (StateId s = 0; s < num_states; ++s) {
P_.Add(s, state_to_initial_class[s]);
}
for (StateId c = 0; c < next_class; ++c) L_.Enqueue(c);
VLOG(5) << "Initial Partition: " << P_.NumClasses();
}
// Creates inverse transition Tr_ = rev(fst), loops over states in FST and
// splits on final, creating two blocks in the partition corresponding to
// final, non-final.
void Initialize(const ExpandedFst<Arc> &fst) {
// Constructs Tr.
Reverse(fst, &Tr_);
ILabelCompare<RevArc> ilabel_comp;
ArcSort(&Tr_, ilabel_comp);
// Tells the partition how many elements to allocate. The first state in
// Tr_ is super-final state.
P_.Initialize(Tr_.NumStates() - 1);
// Prepares initial partition.
PrePartition(fst);
// Allocates arc iterator queue.
ArcIterCompare comp(P_);
aiter_queue_.reset(new ArcIterQueue(comp));
}
// Partitions all classes with destination C.
void Split(ClassId C) {
// Prepares priority queue: opens arc iterator for each state in C, and
// inserts into priority queue.
for (PartitionIterator<StateId> siter(P_, C); !siter.Done(); siter.Next()) {
StateId s = siter.Value();
if (Tr_.NumArcs(s + 1)) {
aiter_queue_->push(new ArcIterator<Fst<RevArc>>(Tr_, s + 1));
}
}
// Now pops arc iterator from queue, splits entering equivalence class, and
// re-inserts updated iterator into queue.
Label prev_label = -1;
while (!aiter_queue_->empty()) {
std::unique_ptr<ArcIterator<Fst<RevArc>>> aiter(aiter_queue_->top());
aiter_queue_->pop();
if (aiter->Done()) continue;
const auto &arc = aiter->Value();
auto from_state = aiter->Value().nextstate - 1;
auto from_label = arc.ilabel;
if (prev_label != from_label) P_.FinalizeSplit(&L_);
auto from_class = P_.ClassId(from_state);
if (P_.ClassSize(from_class) > 1) P_.SplitOn(from_state);
prev_label = from_label;
aiter->Next();
if (!aiter->Done()) aiter_queue_->push(aiter.release());
}
P_.FinalizeSplit(&L_);
}
// Main loop for Hopcroft minimization.
void Compute(const Fst<Arc> &fst) {
// Processes active classes (FIFO, or FILO).
while (!L_.Empty()) {
const auto C = L_.Head();
L_.Dequeue();
Split(C); // Splits on C, all labels in C.
}
}
private:
// Partioning of states into equivalence classes.
Partition<StateId> P_;
// Set of active classes to be processed in partition P.
Queue L_;
// Reverses transition function.
VectorFst<RevArc> Tr_;
// Priority queue of open arc iterators for all states in the splitter
// equivalence class.
std::unique_ptr<ArcIterQueue> aiter_queue_;
};
// Computes equivalence classes for acyclic FST.
//
// Complexity:
//
// O(E)
//
// where E is the number of arcs.
//
// For more information, see:
//
// Revuz, D. 1992. Minimization of acyclic deterministic automata in linear
// time. Theoretical Computer Science 92(1): 181-189.
template <class Arc>
class AcyclicMinimizer {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using ClassId = typename Arc::StateId;
using Weight = typename Arc::Weight;
explicit AcyclicMinimizer(const ExpandedFst<Arc> &fst) {
Initialize(fst);
Refine(fst);
}
const Partition<StateId> &GetPartition() { return partition_; }
private:
// DFS visitor to compute the height (distance) to final state.
class HeightVisitor {
public:
HeightVisitor() : max_height_(0), num_states_(0) {}
// Invoked before DFS visit.
void InitVisit(const Fst<Arc> &fst) {}
// Invoked when state is discovered (2nd arg is DFS tree root).
bool InitState(StateId s, StateId root) {
// Extends height array and initialize height (distance) to 0.
for (StateId i = height_.size(); i <= s; ++i) height_.push_back(-1);
if (s >= num_states_) num_states_ = s + 1;
return true;
}
// Invoked when tree arc examined (to undiscovered state).
bool TreeArc(StateId s, const Arc &arc) { return true; }
// Invoked when back arc examined (to unfinished state).
bool BackArc(StateId s, const Arc &arc) { return true; }
// Invoked when forward or cross arc examined (to finished state).
bool ForwardOrCrossArc(StateId s, const Arc &arc) {
if (height_[arc.nextstate] + 1 > height_[s]) {
height_[s] = height_[arc.nextstate] + 1;
}
return true;
}
// Invoked when state finished (parent is kNoStateId for tree root).
void FinishState(StateId s, StateId parent, const Arc *parent_arc) {
if (height_[s] == -1) height_[s] = 0;
const auto h = height_[s] + 1;
if (parent >= 0) {
if (h > height_[parent]) height_[parent] = h;
if (h > max_height_) max_height_ = h;
}
}
// Invoked after DFS visit.
void FinishVisit() {}
size_t max_height() const { return max_height_; }
const std::vector<StateId> &height() const { return height_; }
size_t num_states() const { return num_states_; }
private:
std::vector<StateId> height_;
size_t max_height_;
size_t num_states_;
};
private:
// Cluster states according to height (distance to final state)
void Initialize(const Fst<Arc> &fst) {
// Computes height (distance to final state).
HeightVisitor hvisitor;
DfsVisit(fst, &hvisitor);
// Creates initial partition based on height.
partition_.Initialize(hvisitor.num_states());
partition_.AllocateClasses(hvisitor.max_height() + 1);
const auto &hstates = hvisitor.height();
for (StateId s = 0; s < hstates.size(); ++s) partition_.Add(s, hstates[s]);
}
// Refines states based on arc sort (out degree, arc equivalence).
void Refine(const Fst<Arc> &fst) {
using EquivalenceMap = std::map<StateId, StateId, StateComparator<Arc>>;
StateComparator<Arc> comp(fst, partition_);
// Starts with tail (height = 0).
auto height = partition_.NumClasses();
for (StateId h = 0; h < height; ++h) {
EquivalenceMap equiv_classes(comp);
// Sorts states within equivalence class.
PartitionIterator<StateId> siter(partition_, h);
equiv_classes[siter.Value()] = h;
for (siter.Next(); !siter.Done(); siter.Next()) {
auto insert_result =
equiv_classes.insert(std::make_pair(siter.Value(), kNoStateId));
if (insert_result.second) {
insert_result.first->second = partition_.AddClass();
}
}
// Creates refined partition.
for (siter.Reset(); !siter.Done();) {
const auto s = siter.Value();
const auto old_class = partition_.ClassId(s);
const auto new_class = equiv_classes[s];
// A move operation can invalidate the iterator, so we first update
// the iterator to the next element before we move the current element
// out of the list.
siter.Next();
if (old_class != new_class) partition_.Move(s, new_class);
}
}
}
private:
Partition<StateId> partition_;
};
// Given a partition and a Mutable FST, merges states of Fst in place (i.e.,
// destructively). Merging works by taking the first state in a class of the
// partition to be the representative state for the class. Each arc is then
// reconnected to this state. All states in the class are merged by adding
// their arcs to the representative state.
template <class Arc>
void MergeStates(const Partition<typename Arc::StateId> &partition,
MutableFst<Arc> *fst) {
using StateId = typename Arc::StateId;
std::vector<StateId> state_map(partition.NumClasses());
for (StateId i = 0; i < partition.NumClasses(); ++i) {
PartitionIterator<StateId> siter(partition, i);
state_map[i] = siter.Value(); // First state in partition.
}
// Relabels destination states.
for (StateId c = 0; c < partition.NumClasses(); ++c) {
for (PartitionIterator<StateId> siter(partition, c); !siter.Done();
siter.Next()) {
const auto s = siter.Value();
for (MutableArcIterator<MutableFst<Arc>> aiter(fst, s); !aiter.Done();
aiter.Next()) {
auto arc = aiter.Value();
arc.nextstate = state_map[partition.ClassId(arc.nextstate)];
if (s == state_map[c]) { // For the first state, just sets destination.
aiter.SetValue(arc);
} else {
fst->AddArc(state_map[c], arc);
}
}
}
}
fst->SetStart(state_map[partition.ClassId(fst->Start())]);
Connect(fst);
}
template <class Arc>
void AcceptorMinimize(MutableFst<Arc> *fst,
bool allow_acyclic_minimization = true) {
if (!(fst->Properties(kAcceptor | kUnweighted, true) ==
(kAcceptor | kUnweighted))) {
FSTERROR() << "FST is not an unweighted acceptor";
fst->SetProperties(kError, kError);
return;
}
// Connects FST before minimization, handles disconnected states.
Connect(fst);
if (fst->NumStates() == 0) return;
if (allow_acyclic_minimization && fst->Properties(kAcyclic, true)) {
// Acyclic minimization (Revuz).
VLOG(2) << "Acyclic minimization";
ArcSort(fst, ILabelCompare<Arc>());
AcyclicMinimizer<Arc> minimizer(*fst);
MergeStates(minimizer.GetPartition(), fst);
} else {
// Either the FST has cycles, or it's generated from non-deterministic input
// (which the Revuz algorithm can't handle), so use the cyclic minimization
// algorithm of Hopcroft.
VLOG(2) << "Cyclic minimization";
CyclicMinimizer<Arc, LifoQueue<typename Arc::StateId>> minimizer(*fst);
MergeStates(minimizer.GetPartition(), fst);
}
// Merges in appropriate semiring
ArcUniqueMapper<Arc> mapper(*fst);
StateMap(fst, mapper);
}
} // namespace internal
// In place minimization of deterministic weighted automata and transducers,
// and also non-deterministic ones if they use an idempotent semiring.
// For transducers, if the 'sfst' argument is not null, the algorithm
// produces a compact factorization of the minimal transducer.
//
// In the acyclic deterministic case, we use an algorithm from Revuz that is
// linear in the number of arcs (edges) in the machine.
//
// In the cyclic or non-deterministic case, we use the classical Hopcroft
// minimization (which was presented for the deterministic case but which
// also works for non-deterministic FSTs); this has complexity O(e log v).
//
template <class Arc>
void Minimize(MutableFst<Arc> *fst, MutableFst<Arc> *sfst = nullptr,
float delta = kShortestDelta, bool allow_nondet = false) {
using Weight = typename Arc::Weight;
const auto props = fst->Properties(
kAcceptor | kIDeterministic | kWeighted | kUnweighted, true);
bool allow_acyclic_minimization;
if (props & kIDeterministic) {
allow_acyclic_minimization = true;
} else {
// Our approach to minimization of non-deterministic FSTs will only work in
// idempotent semirings---for non-deterministic inputs, a state could have
// multiple transitions to states that will get merged, and we'd have to
// sum their weights. The algorithm doesn't handle that.
if (!(Weight::Properties() & kIdempotent)) {
fst->SetProperties(kError, kError);
FSTERROR() << "Cannot minimize a non-deterministic FST over a "
"non-idempotent semiring";
return;
} else if (!allow_nondet) {
fst->SetProperties(kError, kError);
FSTERROR() << "Refusing to minimize a non-deterministic FST with "
<< "allow_nondet = false";
return;
}
// The Revuz algorithm won't work for nondeterministic inputs, so if the
// input is nondeterministic, we'll have to pass a bool saying not to use
// that algorithm. We check at this level rather than in AcceptorMinimize(),
// because it's possible that the FST at this level could be deterministic,
// but a harmless type of non-determinism could be introduced by Encode()
// (thanks to kEncodeWeights, if the FST has epsilons and has a final
// weight with weights equal to some epsilon arc.)
allow_acyclic_minimization = false;
}
if (!(props & kAcceptor)) { // Weighted transducer.
VectorFst<GallicArc<Arc, GALLIC_LEFT>> gfst;
ArcMap(*fst, &gfst, ToGallicMapper<Arc, GALLIC_LEFT>());
fst->DeleteStates();
gfst.SetProperties(kAcceptor, kAcceptor);
Push(&gfst, REWEIGHT_TO_INITIAL, delta);
ArcMap(&gfst, QuantizeMapper<GallicArc<Arc, GALLIC_LEFT>>(delta));
EncodeMapper<GallicArc<Arc, GALLIC_LEFT>> encoder(
kEncodeLabels | kEncodeWeights, ENCODE);
Encode(&gfst, &encoder);
internal::AcceptorMinimize(&gfst, allow_acyclic_minimization);
Decode(&gfst, encoder);
if (!sfst) {
FactorWeightFst<GallicArc<Arc, GALLIC_LEFT>,
GallicFactor<typename Arc::Label, Weight, GALLIC_LEFT>>
fwfst(gfst);
std::unique_ptr<SymbolTable> osyms(
fst->OutputSymbols() ? fst->OutputSymbols()->Copy() : nullptr);
ArcMap(fwfst, fst, FromGallicMapper<Arc, GALLIC_LEFT>());
fst->SetOutputSymbols(osyms.get());
} else {
sfst->SetOutputSymbols(fst->OutputSymbols());
GallicToNewSymbolsMapper<Arc, GALLIC_LEFT> mapper(sfst);
ArcMap(gfst, fst, &mapper);
fst->SetOutputSymbols(sfst->InputSymbols());
}
} else if (props & kWeighted) { // Weighted acceptor.
Push(fst, REWEIGHT_TO_INITIAL, delta);
ArcMap(fst, QuantizeMapper<Arc>(delta));
EncodeMapper<Arc> encoder(kEncodeLabels | kEncodeWeights, ENCODE);
Encode(fst, &encoder);
internal::AcceptorMinimize(fst, allow_acyclic_minimization);
Decode(fst, encoder);
} else { // Unweighted acceptor.
internal::AcceptorMinimize(fst, allow_acyclic_minimization);
}
}
} // namespace fst
#endif // FST_MINIMIZE_H_
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/native_client/multistrap_armbian64_buster.conf | [General]
arch=arm64
noauth=false
unpack=true
debootstrap=Debian
aptsources=Debian
cleanup=true
[Debian]
packages=apt libc6 libc6-dev libstdc++-7-dev linux-libc-dev libffi-dev libpython3.7-dev libsox-dev python3-numpy python3-setuptools
source=http://deb.debian.org/debian
keyring=debian-archive-keyring
components=main
suite=buster
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstrandgen.cc | #include <unistd.h>
#include <climits>
#include <ctime>
#include <fst/flags.h>
DEFINE_int32(max_length, INT32_MAX, "Maximum path length");
DEFINE_int32(npath, 1, "Number of paths to generate");
DEFINE_int32(seed, time(nullptr) + getpid(), "Random seed");
DEFINE_string(select, "uniform",
"Selection type: one of: "
" \"uniform\", \"log_prob\" (when appropriate),"
" \"fast_log_prob\" (when appropriate)");
DEFINE_bool(weighted, false,
"Output tree weighted by path count vs. unweighted paths");
DEFINE_bool(remove_total_weight, false,
"Remove total weight when output weighted");
int fstrandgen_main(int argc, char **argv);
int main(int argc, char **argv) { return fstrandgen_main(argc, argv); }
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/ngram/CMakeLists.txt | file(GLOB HEADER_FILES ../../include/fst/extensions/ngram/*.h)
message(STATUS "${HEADER_FILES}")
add_library(fstngram
bitmap-index.cc
ngram-fst.cc
nthbit.cc
${HEADER_FILES}
)
target_link_libraries(fstngram
fst
)
set_target_properties(fstngram PROPERTIES
SOVERSION "${SOVERSION}"
FOLDER ngram
)
install(TARGETS fstngram
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
RUNTIME DESTINATION lib
)
add_library(ngram_fst MODULE
bitmap-index.cc
ngram-fst.cc
nthbit.cc
)
set_target_properties(ngram_fst PROPERTIES
WINDOWS_EXPORT_ALL_SYMBOLS true
FOLDER ngram/modules
)
target_link_libraries(ngram_fst
fst
)
install(TARGETS ngram_fst
LIBRARY DESTINATION lib/fst
)
| 0 |
coqui_public_repos/STT/native_client/kenlm/util | coqui_public_repos/STT/native_client/kenlm/util/double-conversion/fast-dtoa.h | // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_
#define DOUBLE_CONVERSION_FAST_DTOA_H_
#include "utils.h"
namespace kenlm_double_conversion {
enum FastDtoaMode {
// Computes the shortest representation of the given input. The returned
// result will be the most accurate number of this length. Longer
// representations might be more accurate.
FAST_DTOA_SHORTEST,
// Same as FAST_DTOA_SHORTEST but for single-precision floats.
FAST_DTOA_SHORTEST_SINGLE,
// Computes a representation where the precision (number of digits) is
// given as input. The precision is independent of the decimal point.
FAST_DTOA_PRECISION
};
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17;
// Same for single-precision numbers.
static const int kFastDtoaMaximalSingleLength = 9;
// Provides a decimal representation of v.
// The result should be interpreted as buffer * 10^(point - length).
//
// Precondition:
// * v must be a strictly positive finite double.
//
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true and mode equals
// - FAST_DTOA_SHORTEST, then
// the parameter requested_digits is ignored.
// The result satisfies
// v == (double) (buffer * 10^(point - length)).
// The digits in the buffer are the shortest representation possible. E.g.
// if 0.099999999999 and 0.1 represent the same double then "1" is returned
// with point = 0.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain
// the one closest to v.
// - FAST_DTOA_PRECISION, then
// the buffer contains requested_digits digits.
// the difference v - (buffer * 10^(point-length)) is closest to zero for
// all possible representations of requested_digits digits.
// If there are two values that are equally close, then FastDtoa returns
// false.
// For both modes the buffer must be large enough to hold the result.
bool FastDtoa(double d,
FastDtoaMode mode,
int requested_digits,
Vector<char> buffer,
int* length,
int* decimal_point);
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_FAST_DTOA_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/compact/Makefile.in | # Makefile.in generated by automake 1.15.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2017 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = { \
if test -z '$(MAKELEVEL)'; then \
false; \
elif test -n '$(MAKE_HOST)'; then \
true; \
elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
true; \
else \
false; \
fi; \
}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = src/extensions/compact
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)"
LTLIBRARIES = $(lib_LTLIBRARIES) $(libfst_LTLIBRARIES)
compact16_acceptor_fst_la_LIBADD =
am_compact16_acceptor_fst_la_OBJECTS = compact16_acceptor-fst.lo
compact16_acceptor_fst_la_OBJECTS = \
$(am_compact16_acceptor_fst_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
compact16_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact16_acceptor_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact16_string_fst_la_LIBADD =
am_compact16_string_fst_la_OBJECTS = compact16_string-fst.lo
compact16_string_fst_la_OBJECTS = \
$(am_compact16_string_fst_la_OBJECTS)
compact16_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(compact16_string_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
compact16_unweighted_fst_la_LIBADD =
am_compact16_unweighted_fst_la_OBJECTS = compact16_unweighted-fst.lo
compact16_unweighted_fst_la_OBJECTS = \
$(am_compact16_unweighted_fst_la_OBJECTS)
compact16_unweighted_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact16_unweighted_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact16_unweighted_acceptor_fst_la_LIBADD =
am_compact16_unweighted_acceptor_fst_la_OBJECTS = \
compact16_unweighted_acceptor-fst.lo
compact16_unweighted_acceptor_fst_la_OBJECTS = \
$(am_compact16_unweighted_acceptor_fst_la_OBJECTS)
compact16_unweighted_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) \
--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
$(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact16_unweighted_acceptor_fst_la_LDFLAGS) $(LDFLAGS) -o \
$@
compact16_weighted_string_fst_la_LIBADD =
am_compact16_weighted_string_fst_la_OBJECTS = \
compact16_weighted_string-fst.lo
compact16_weighted_string_fst_la_OBJECTS = \
$(am_compact16_weighted_string_fst_la_OBJECTS)
compact16_weighted_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) \
--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
$(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact16_weighted_string_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact64_acceptor_fst_la_LIBADD =
am_compact64_acceptor_fst_la_OBJECTS = compact64_acceptor-fst.lo
compact64_acceptor_fst_la_OBJECTS = \
$(am_compact64_acceptor_fst_la_OBJECTS)
compact64_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact64_acceptor_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact64_string_fst_la_LIBADD =
am_compact64_string_fst_la_OBJECTS = compact64_string-fst.lo
compact64_string_fst_la_OBJECTS = \
$(am_compact64_string_fst_la_OBJECTS)
compact64_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(compact64_string_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
compact64_unweighted_fst_la_LIBADD =
am_compact64_unweighted_fst_la_OBJECTS = compact64_unweighted-fst.lo
compact64_unweighted_fst_la_OBJECTS = \
$(am_compact64_unweighted_fst_la_OBJECTS)
compact64_unweighted_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact64_unweighted_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact64_unweighted_acceptor_fst_la_LIBADD =
am_compact64_unweighted_acceptor_fst_la_OBJECTS = \
compact64_unweighted_acceptor-fst.lo
compact64_unweighted_acceptor_fst_la_OBJECTS = \
$(am_compact64_unweighted_acceptor_fst_la_OBJECTS)
compact64_unweighted_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) \
--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
$(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact64_unweighted_acceptor_fst_la_LDFLAGS) $(LDFLAGS) -o \
$@
compact64_weighted_string_fst_la_LIBADD =
am_compact64_weighted_string_fst_la_OBJECTS = \
compact64_weighted_string-fst.lo
compact64_weighted_string_fst_la_OBJECTS = \
$(am_compact64_weighted_string_fst_la_OBJECTS)
compact64_weighted_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) \
--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
$(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact64_weighted_string_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact8_acceptor_fst_la_LIBADD =
am_compact8_acceptor_fst_la_OBJECTS = compact8_acceptor-fst.lo
compact8_acceptor_fst_la_OBJECTS = \
$(am_compact8_acceptor_fst_la_OBJECTS)
compact8_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(compact8_acceptor_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
compact8_string_fst_la_LIBADD =
am_compact8_string_fst_la_OBJECTS = compact8_string-fst.lo
compact8_string_fst_la_OBJECTS = $(am_compact8_string_fst_la_OBJECTS)
compact8_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(compact8_string_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
compact8_unweighted_fst_la_LIBADD =
am_compact8_unweighted_fst_la_OBJECTS = compact8_unweighted-fst.lo
compact8_unweighted_fst_la_OBJECTS = \
$(am_compact8_unweighted_fst_la_OBJECTS)
compact8_unweighted_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact8_unweighted_fst_la_LDFLAGS) $(LDFLAGS) -o $@
compact8_unweighted_acceptor_fst_la_LIBADD =
am_compact8_unweighted_acceptor_fst_la_OBJECTS = \
compact8_unweighted_acceptor-fst.lo
compact8_unweighted_acceptor_fst_la_OBJECTS = \
$(am_compact8_unweighted_acceptor_fst_la_OBJECTS)
compact8_unweighted_acceptor_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) \
--tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \
$(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact8_unweighted_acceptor_fst_la_LDFLAGS) $(LDFLAGS) -o \
$@
compact8_weighted_string_fst_la_LIBADD =
am_compact8_weighted_string_fst_la_OBJECTS = \
compact8_weighted_string-fst.lo
compact8_weighted_string_fst_la_OBJECTS = \
$(am_compact8_weighted_string_fst_la_OBJECTS)
compact8_weighted_string_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) \
$(compact8_weighted_string_fst_la_LDFLAGS) $(LDFLAGS) -o $@
am__DEPENDENCIES_1 =
libfstcompact_la_DEPENDENCIES = ../../lib/libfst.la \
$(am__DEPENDENCIES_1)
am_libfstcompact_la_OBJECTS = compact8_acceptor-fst.lo \
compact8_string-fst.lo compact8_unweighted-fst.lo \
compact8_unweighted_acceptor-fst.lo \
compact8_weighted_string-fst.lo compact16_acceptor-fst.lo \
compact16_string-fst.lo compact16_unweighted-fst.lo \
compact16_unweighted_acceptor-fst.lo \
compact16_weighted_string-fst.lo compact64_acceptor-fst.lo \
compact64_string-fst.lo compact64_unweighted-fst.lo \
compact64_unweighted_acceptor-fst.lo \
compact64_weighted_string-fst.lo
libfstcompact_la_OBJECTS = $(am_libfstcompact_la_OBJECTS)
libfstcompact_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(libfstcompact_la_LDFLAGS) \
$(LDFLAGS) -o $@
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES =
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CXXFLAGS) $(CXXFLAGS)
AM_V_CXX = $(am__v_CXX_@AM_V@)
am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
am__v_CXX_0 = @echo " CXX " $@;
am__v_CXX_1 =
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
am__v_CXXLD_0 = @echo " CXXLD " $@;
am__v_CXXLD_1 =
SOURCES = $(compact16_acceptor_fst_la_SOURCES) \
$(compact16_string_fst_la_SOURCES) \
$(compact16_unweighted_fst_la_SOURCES) \
$(compact16_unweighted_acceptor_fst_la_SOURCES) \
$(compact16_weighted_string_fst_la_SOURCES) \
$(compact64_acceptor_fst_la_SOURCES) \
$(compact64_string_fst_la_SOURCES) \
$(compact64_unweighted_fst_la_SOURCES) \
$(compact64_unweighted_acceptor_fst_la_SOURCES) \
$(compact64_weighted_string_fst_la_SOURCES) \
$(compact8_acceptor_fst_la_SOURCES) \
$(compact8_string_fst_la_SOURCES) \
$(compact8_unweighted_fst_la_SOURCES) \
$(compact8_unweighted_acceptor_fst_la_SOURCES) \
$(compact8_weighted_string_fst_la_SOURCES) \
$(libfstcompact_la_SOURCES)
DIST_SOURCES = $(compact16_acceptor_fst_la_SOURCES) \
$(compact16_string_fst_la_SOURCES) \
$(compact16_unweighted_fst_la_SOURCES) \
$(compact16_unweighted_acceptor_fst_la_SOURCES) \
$(compact16_weighted_string_fst_la_SOURCES) \
$(compact64_acceptor_fst_la_SOURCES) \
$(compact64_string_fst_la_SOURCES) \
$(compact64_unweighted_fst_la_SOURCES) \
$(compact64_unweighted_acceptor_fst_la_SOURCES) \
$(compact64_weighted_string_fst_la_SOURCES) \
$(compact8_acceptor_fst_la_SOURCES) \
$(compact8_string_fst_la_SOURCES) \
$(compact8_unweighted_fst_la_SOURCES) \
$(compact8_unweighted_acceptor_fst_la_SOURCES) \
$(compact8_weighted_string_fst_la_SOURCES) \
$(libfstcompact_la_SOURCES)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
runstatedir = @runstatedir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS)
libfst_LTLIBRARIES = compact8_acceptor-fst.la compact8_string-fst.la compact8_unweighted-fst.la compact8_unweighted_acceptor-fst.la compact8_weighted_string-fst.la compact16_acceptor-fst.la compact16_string-fst.la compact16_unweighted-fst.la compact16_unweighted_acceptor-fst.la compact16_weighted_string-fst.la compact64_acceptor-fst.la compact64_string-fst.la compact64_unweighted-fst.la compact64_unweighted_acceptor-fst.la compact64_weighted_string-fst.la
lib_LTLIBRARIES = libfstcompact.la
libfstcompact_la_SOURCES = compact8_acceptor-fst.cc compact8_string-fst.cc compact8_unweighted-fst.cc compact8_unweighted_acceptor-fst.cc compact8_weighted_string-fst.cc compact16_acceptor-fst.cc compact16_string-fst.cc compact16_unweighted-fst.cc compact16_unweighted_acceptor-fst.cc compact16_weighted_string-fst.cc compact64_acceptor-fst.cc compact64_string-fst.cc compact64_unweighted-fst.cc compact64_unweighted_acceptor-fst.cc compact64_weighted_string-fst.cc
libfstcompact_la_LDFLAGS = -version-info 13:0:0
libfstcompact_la_LIBADD = ../../lib/libfst.la -lm $(DL_LIBS)
compact8_acceptor_fst_la_SOURCES = compact8_acceptor-fst.cc
compact8_acceptor_fst_la_LDFLAGS = -module
compact8_string_fst_la_SOURCES = compact8_string-fst.cc
compact8_string_fst_la_LDFLAGS = -module
compact8_unweighted_fst_la_SOURCES = compact8_unweighted-fst.cc
compact8_unweighted_fst_la_LDFLAGS = -module
compact8_unweighted_acceptor_fst_la_SOURCES = compact8_unweighted_acceptor-fst.cc
compact8_unweighted_acceptor_fst_la_LDFLAGS = -module
compact8_weighted_string_fst_la_SOURCES = compact8_weighted_string-fst.cc
compact8_weighted_string_fst_la_LDFLAGS = -module
compact16_acceptor_fst_la_SOURCES = compact16_acceptor-fst.cc
compact16_acceptor_fst_la_LDFLAGS = -module
compact16_string_fst_la_SOURCES = compact16_string-fst.cc
compact16_string_fst_la_LDFLAGS = -module
compact16_unweighted_fst_la_SOURCES = compact16_unweighted-fst.cc
compact16_unweighted_fst_la_LDFLAGS = -module
compact16_unweighted_acceptor_fst_la_SOURCES = compact16_unweighted_acceptor-fst.cc
compact16_unweighted_acceptor_fst_la_LDFLAGS = -module
compact16_weighted_string_fst_la_SOURCES = compact16_weighted_string-fst.cc
compact16_weighted_string_fst_la_LDFLAGS = -module
compact64_acceptor_fst_la_SOURCES = compact64_acceptor-fst.cc
compact64_acceptor_fst_la_LDFLAGS = -module
compact64_string_fst_la_SOURCES = compact64_string-fst.cc
compact64_string_fst_la_LDFLAGS = -module
compact64_unweighted_fst_la_SOURCES = compact64_unweighted-fst.cc
compact64_unweighted_fst_la_LDFLAGS = -module
compact64_unweighted_acceptor_fst_la_SOURCES = compact64_unweighted_acceptor-fst.cc
compact64_unweighted_acceptor_fst_la_LDFLAGS = -module
compact64_weighted_string_fst_la_SOURCES = compact64_weighted_string-fst.cc
compact64_weighted_string_fst_la_LDFLAGS = -module
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/compact/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/compact/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
}
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
install-libfstLTLIBRARIES: $(libfst_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libfstdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libfstdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libfstdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libfstdir)"; \
}
uninstall-libfstLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libfstdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libfstdir)/$$f"; \
done
clean-libfstLTLIBRARIES:
-test -z "$(libfst_LTLIBRARIES)" || rm -f $(libfst_LTLIBRARIES)
@list='$(libfst_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
compact16_acceptor-fst.la: $(compact16_acceptor_fst_la_OBJECTS) $(compact16_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact16_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact16_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact16_acceptor_fst_la_OBJECTS) $(compact16_acceptor_fst_la_LIBADD) $(LIBS)
compact16_string-fst.la: $(compact16_string_fst_la_OBJECTS) $(compact16_string_fst_la_DEPENDENCIES) $(EXTRA_compact16_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact16_string_fst_la_LINK) -rpath $(libfstdir) $(compact16_string_fst_la_OBJECTS) $(compact16_string_fst_la_LIBADD) $(LIBS)
compact16_unweighted-fst.la: $(compact16_unweighted_fst_la_OBJECTS) $(compact16_unweighted_fst_la_DEPENDENCIES) $(EXTRA_compact16_unweighted_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact16_unweighted_fst_la_LINK) -rpath $(libfstdir) $(compact16_unweighted_fst_la_OBJECTS) $(compact16_unweighted_fst_la_LIBADD) $(LIBS)
compact16_unweighted_acceptor-fst.la: $(compact16_unweighted_acceptor_fst_la_OBJECTS) $(compact16_unweighted_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact16_unweighted_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact16_unweighted_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact16_unweighted_acceptor_fst_la_OBJECTS) $(compact16_unweighted_acceptor_fst_la_LIBADD) $(LIBS)
compact16_weighted_string-fst.la: $(compact16_weighted_string_fst_la_OBJECTS) $(compact16_weighted_string_fst_la_DEPENDENCIES) $(EXTRA_compact16_weighted_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact16_weighted_string_fst_la_LINK) -rpath $(libfstdir) $(compact16_weighted_string_fst_la_OBJECTS) $(compact16_weighted_string_fst_la_LIBADD) $(LIBS)
compact64_acceptor-fst.la: $(compact64_acceptor_fst_la_OBJECTS) $(compact64_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact64_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact64_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact64_acceptor_fst_la_OBJECTS) $(compact64_acceptor_fst_la_LIBADD) $(LIBS)
compact64_string-fst.la: $(compact64_string_fst_la_OBJECTS) $(compact64_string_fst_la_DEPENDENCIES) $(EXTRA_compact64_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact64_string_fst_la_LINK) -rpath $(libfstdir) $(compact64_string_fst_la_OBJECTS) $(compact64_string_fst_la_LIBADD) $(LIBS)
compact64_unweighted-fst.la: $(compact64_unweighted_fst_la_OBJECTS) $(compact64_unweighted_fst_la_DEPENDENCIES) $(EXTRA_compact64_unweighted_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact64_unweighted_fst_la_LINK) -rpath $(libfstdir) $(compact64_unweighted_fst_la_OBJECTS) $(compact64_unweighted_fst_la_LIBADD) $(LIBS)
compact64_unweighted_acceptor-fst.la: $(compact64_unweighted_acceptor_fst_la_OBJECTS) $(compact64_unweighted_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact64_unweighted_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact64_unweighted_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact64_unweighted_acceptor_fst_la_OBJECTS) $(compact64_unweighted_acceptor_fst_la_LIBADD) $(LIBS)
compact64_weighted_string-fst.la: $(compact64_weighted_string_fst_la_OBJECTS) $(compact64_weighted_string_fst_la_DEPENDENCIES) $(EXTRA_compact64_weighted_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact64_weighted_string_fst_la_LINK) -rpath $(libfstdir) $(compact64_weighted_string_fst_la_OBJECTS) $(compact64_weighted_string_fst_la_LIBADD) $(LIBS)
compact8_acceptor-fst.la: $(compact8_acceptor_fst_la_OBJECTS) $(compact8_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact8_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact8_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact8_acceptor_fst_la_OBJECTS) $(compact8_acceptor_fst_la_LIBADD) $(LIBS)
compact8_string-fst.la: $(compact8_string_fst_la_OBJECTS) $(compact8_string_fst_la_DEPENDENCIES) $(EXTRA_compact8_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact8_string_fst_la_LINK) -rpath $(libfstdir) $(compact8_string_fst_la_OBJECTS) $(compact8_string_fst_la_LIBADD) $(LIBS)
compact8_unweighted-fst.la: $(compact8_unweighted_fst_la_OBJECTS) $(compact8_unweighted_fst_la_DEPENDENCIES) $(EXTRA_compact8_unweighted_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact8_unweighted_fst_la_LINK) -rpath $(libfstdir) $(compact8_unweighted_fst_la_OBJECTS) $(compact8_unweighted_fst_la_LIBADD) $(LIBS)
compact8_unweighted_acceptor-fst.la: $(compact8_unweighted_acceptor_fst_la_OBJECTS) $(compact8_unweighted_acceptor_fst_la_DEPENDENCIES) $(EXTRA_compact8_unweighted_acceptor_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact8_unweighted_acceptor_fst_la_LINK) -rpath $(libfstdir) $(compact8_unweighted_acceptor_fst_la_OBJECTS) $(compact8_unweighted_acceptor_fst_la_LIBADD) $(LIBS)
compact8_weighted_string-fst.la: $(compact8_weighted_string_fst_la_OBJECTS) $(compact8_weighted_string_fst_la_DEPENDENCIES) $(EXTRA_compact8_weighted_string_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(compact8_weighted_string_fst_la_LINK) -rpath $(libfstdir) $(compact8_weighted_string_fst_la_OBJECTS) $(compact8_weighted_string_fst_la_LIBADD) $(LIBS)
libfstcompact.la: $(libfstcompact_la_OBJECTS) $(libfstcompact_la_DEPENDENCIES) $(EXTRA_libfstcompact_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libfstcompact_la_LINK) -rpath $(libdir) $(libfstcompact_la_OBJECTS) $(libfstcompact_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact16_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact16_string-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact16_unweighted-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact16_unweighted_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact16_weighted_string-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact64_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact64_string-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact64_unweighted-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact64_unweighted_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact64_weighted_string-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact8_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact8_string-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact8_unweighted-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact8_unweighted_acceptor-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compact8_weighted_string-fst.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES)
installdirs:
for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-libLTLIBRARIES clean-libfstLTLIBRARIES \
clean-libtool mostlyclean-am
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am: install-libfstLTLIBRARIES
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-libLTLIBRARIES
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-libLTLIBRARIES uninstall-libfstLTLIBRARIES
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
clean-libLTLIBRARIES clean-libfstLTLIBRARIES clean-libtool \
cscopelist-am ctags ctags-am distclean distclean-compile \
distclean-generic distclean-libtool distclean-tags distdir dvi \
dvi-am html html-am info info-am install install-am \
install-data install-data-am install-dvi install-dvi-am \
install-exec install-exec-am install-html install-html-am \
install-info install-info-am install-libLTLIBRARIES \
install-libfstLTLIBRARIES install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES \
uninstall-libfstLTLIBRARIES
.PRECIOUS: Makefile
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos/STT-examples | coqui_public_repos/STT-examples/batch_processing/setup.ps1 | $env:Path += ";C:\Users\jmike\Downloads\cudnn-10.0-windows10-x64-v7.5.1.10\cuda\bin"
$env:Path += ";$env:userprofile\Downloads\TensorRT-5.1.5.0.Windows10.x86_64.cuda-10.0.cudnn7.5\TensorRT-5.1.5.0\lib"
$env:Path += ";C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin"
$env:Path += ";c:\tools\msys64\usr\bin\"
$env:Path += ";C:\Program Files (x86)\Dr. Memory\bin\"
| 0 |
coqui_public_repos/STT-models/french/commonvoice-fr | coqui_public_repos/STT-models/french/commonvoice-fr/v0.8/LICENSE | Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/ci_scripts/tf-package.sh | #!/bin/bash
set -xe
source $(dirname $0)/tf-vars.sh
mkdir -p ${CI_ARTIFACTS_DIR} || true
OUTPUT_ROOT="${DS_ROOT_TASK}/tensorflow/bazel-bin"
for output_bin in \
tensorflow/lite/libtensorflow.so \
tensorflow/lite/libtensorflow.so.if.lib \
;
do
if [ -f "${OUTPUT_ROOT}/${output_bin}" ]; then
cp ${OUTPUT_ROOT}/${output_bin} ${CI_ARTIFACTS_DIR}/
fi;
done
# Make a tar of bazel caches
RELATIVE_CACHE_ROOT=$(realpath --relative-to="${DS_ROOT_TASK}" ${BAZEL_CACHE_ROOT})
if [ "${OS}" = "${CI_MSYS_VERSION}" ]; then
export PATH=$PATH:'/c/Program Files/7-Zip/'
pushd ${DS_ROOT_TASK}
7z a -snl -snh -so home.tar ${RELATIVE_CACHE_ROOT} | 7z a -si ${CI_ARTIFACTS_DIR}/home.tar.xz
popd
else
${TAR} -C ${DS_ROOT_TASK} -cf - ${RELATIVE_CACHE_ROOT} | ${XZ} > ${CI_ARTIFACTS_DIR}/home.tar.xz
fi
if [ "${OS}" = "Linux" ]; then
SHA_SUM_GEN="sha256sum"
elif [ "${OS}" = "${CI_MSYS_VERSION}" ]; then
SHA_SUM_GEN="sha256sum"
elif [ "${OS}" = "Darwin" ]; then
SHA_SUM_GEN="shasum -a 256"
fi;
${SHA_SUM_GEN} ${CI_ARTIFACTS_DIR}/* > ${CI_ARTIFACTS_DIR}/checksums.txt
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/pdt.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Common classes for PDT expansion/traversal.
#ifndef FST_EXTENSIONS_PDT_PDT_H_
#define FST_EXTENSIONS_PDT_PDT_H_
#include <map>
#include <set>
#include <unordered_map>
#include <fst/compat.h>
#include <fst/log.h>
#include <fst/fst.h>
#include <fst/state-table.h>
namespace fst {
// Provides bijection between parenthesis stacks and signed integral stack IDs.
// Each stack ID is unique to each distinct stack. The open-close parenthesis
// label pairs are passed using the parens argument.
template <typename StackId, typename Label>
class PdtStack {
public:
// The stacks are stored in a tree. The nodes are stored in a vector. Each
// node represents the top of some stack and is identified by its position in
// the vector. Its' parent node represents the stack with the top popped and
// its children are stored in child_map_ and accessed by stack_id and label.
// The paren_id is
// the position in parens of the parenthesis for that node.
struct StackNode {
StackId parent_id;
size_t paren_id;
StackNode(StackId p, size_t i) : parent_id(p), paren_id(i) {}
};
explicit PdtStack(const std::vector<std::pair<Label, Label>> &parens)
: parens_(parens), min_paren_(kNoLabel), max_paren_(kNoLabel) {
for (size_t i = 0; i < parens.size(); ++i) {
const auto &pair = parens[i];
paren_map_[pair.first] = i;
paren_map_[pair.second] = i;
if (min_paren_ == kNoLabel || pair.first < min_paren_) {
min_paren_ = pair.first;
}
if (pair.second < min_paren_) min_paren_ = pair.second;
if (max_paren_ == kNoLabel || pair.first > max_paren_) {
max_paren_ = pair.first;
}
if (pair.second > max_paren_) max_paren_ = pair.second;
}
nodes_.push_back(StackNode(-1, -1)); // Tree root.
}
// Returns stack ID given the current stack ID (0 if empty) and label read.
// Pushes onto the stack if the label is an open parenthesis, returning the
// new stack ID. Pops the stack if the label is a close parenthesis that
// matches the top of the stack, returning the parent stack ID. Returns -1 if
// label is an unmatched close parenthesis. Otherwise, returns the current
// stack ID.
StackId Find(StackId stack_id, Label label) {
if (min_paren_ == kNoLabel || label < min_paren_ || label > max_paren_) {
return stack_id; // Non-paren.
}
const auto it = paren_map_.find(label);
// Non-paren.
if (it == paren_map_.end()) return stack_id;
const auto paren_id = it->second;
// Open paren.
if (label == parens_[paren_id].first) {
auto &child_id = child_map_[std::make_pair(stack_id, label)];
if (child_id == 0) { // Child not found; pushes label.
child_id = nodes_.size();
nodes_.push_back(StackNode(stack_id, paren_id));
}
return child_id;
}
const auto &node = nodes_[stack_id];
// Matching close paren.
if (paren_id == node.paren_id) return node.parent_id;
// Non-matching close paren.
return -1;
}
// Returns the stack ID obtained by popping the label at the top of the
// current stack ID.
StackId Pop(StackId stack_id) const { return nodes_[stack_id].parent_id; }
// Returns the paren ID at the top of the stack.
std::ptrdiff_t Top(StackId stack_id) const { return nodes_[stack_id].paren_id; }
std::ptrdiff_t ParenId(Label label) const {
const auto it = paren_map_.find(label);
if (it == paren_map_.end()) return -1; // Non-paren.
return it->second;
}
private:
struct ChildHash {
size_t operator()(const std::pair<StackId, Label> &pair) const {
static constexpr size_t prime = 7853;
return static_cast<size_t>(pair.first) +
static_cast<size_t>(pair.second) * prime;
}
};
std::vector<std::pair<Label, Label>> parens_;
std::vector<StackNode> nodes_;
std::unordered_map<Label, size_t> paren_map_;
// Child of stack node w.r.t label
std::unordered_map<std::pair<StackId, Label>, StackId, ChildHash> child_map_;
Label min_paren_;
Label max_paren_;
};
// State tuple for PDT expansion.
template <typename S, typename K>
struct PdtStateTuple {
using StateId = S;
using StackId = K;
StateId state_id;
StackId stack_id;
PdtStateTuple(StateId state_id = kNoStateId, StackId stack_id = -1)
: state_id(state_id), stack_id(stack_id) {}
};
// Equality of PDT state tuples.
template <typename S, typename K>
inline bool operator==(const PdtStateTuple<S, K> &x,
const PdtStateTuple<S, K> &y) {
if (&x == &y) return true;
return x.state_id == y.state_id && x.stack_id == y.stack_id;
}
// Hash function object for PDT state tuples
template <class T>
class PdtStateHash {
public:
size_t operator()(const T &tuple) const {
static constexpr auto prime = 7853;
return tuple.state_id + tuple.stack_id * prime;
}
};
// Tuple to PDT state bijection.
template <class StateId, class StackId>
class PdtStateTable : public CompactHashStateTable<
PdtStateTuple<StateId, StackId>,
PdtStateHash<PdtStateTuple<StateId, StackId>>> {
public:
PdtStateTable() {}
PdtStateTable(const PdtStateTable &other) {}
private:
PdtStateTable &operator=(const PdtStateTable &) = delete;
};
} // namespace fst
#endif // FST_EXTENSIONS_PDT_PDT_H_
| 0 |
coqui_public_repos/STT/native_client/dotnet/STTClient | coqui_public_repos/STT/native_client/dotnet/STTClient/Structs/TokenMetadata.cs | using System;
using System.Runtime.InteropServices;
namespace STTClient.Structs
{
[StructLayout(LayoutKind.Sequential)]
internal unsafe struct TokenMetadata
{
/// <summary>
/// Native text.
/// </summary>
internal unsafe IntPtr text;
/// <summary>
/// Position of the character in units of 20ms.
/// </summary>
internal unsafe int timestep;
/// <summary>
/// Position of the character in seconds.
/// </summary>
internal unsafe float start_time;
}
}
| 0 |
coqui_public_repos/STT/native_client/kenlm/lm | coqui_public_repos/STT/native_client/kenlm/lm/interpolate/tune_weights.hh | #ifndef LM_INTERPOLATE_TUNE_WEIGHTS_H
#define LM_INTERPOLATE_TUNE_WEIGHTS_H
#include "../../util/string_piece.hh"
#include <vector>
namespace lm { namespace interpolate {
struct InstancesConfig;
// Run a tuning loop, producing weights as output.
void TuneWeights(int tune_file, const std::vector<StringPiece> &model_names, const InstancesConfig &config, std::vector<float> &weights);
}} // namespaces
#endif // LM_INTERPOLATE_TUNE_WEIGHTS_H
| 0 |
coqui_public_repos/STT-models/chuvash/itml | coqui_public_repos/STT-models/chuvash/itml/v0.1.1/LICENSE | GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
| 0 |
coqui_public_repos/STT/native_client/dotnet/STTClient | coqui_public_repos/STT/native_client/dotnet/STTClient/Structs/Metadata.cs | using System;
using System.Runtime.InteropServices;
namespace STTClient.Structs
{
[StructLayout(LayoutKind.Sequential)]
internal unsafe struct Metadata
{
/// <summary>
/// Native list of candidate transcripts.
/// </summary>
internal unsafe IntPtr transcripts;
/// <summary>
/// Count of transcripts from the native side.
/// </summary>
internal unsafe int num_transcripts;
}
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstcompose.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_string(compose_filter, "auto",
"Composition filter, one of: \"alt_sequence\", \"auto\", "
"\"match\", \"null\", \"sequence\", \"trivial\"");
DEFINE_bool(connect, true, "Trim output");
int fstcompose_main(int argc, char **argv);
int main(int argc, char **argv) { return fstcompose_main(argc, argv); }
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/collection.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to store a collection of ordered (multi-)sets with elements of type T.
#ifndef FST_EXTENSIONS_PDT_COLLECTION_H_
#define FST_EXTENSIONS_PDT_COLLECTION_H_
#include <functional>
#include <vector>
#include <fst/log.h>
#include <fst/bi-table.h>
namespace fst {
// Stores a collection of non-empty, ordered (multi-)sets with elements of type
// T. A default constructor, operator==, and an STL-style hash functor must be
// defined on the elements. Provides signed integer ID (of type I) for each
// unique set. The IDs are allocated starting from 0 in order.
template <class I, class T>
class Collection {
public:
struct Node { // Trie node.
I node_id; // Root is kNoNodeId;
T element;
Node() : node_id(kNoNodeId), element(T()) {}
Node(I i, const T &t) : node_id(i), element(t) {}
bool operator==(const Node &n) const {
return n.node_id == node_id && n.element == element;
}
};
struct NodeHash {
size_t operator()(const Node &n) const {
static constexpr auto kPrime = 7853;
return n.node_id + hash_(n.element) * kPrime;
}
};
using NodeTable = CompactHashBiTable<I, Node, NodeHash>;
class SetIterator {
public:
SetIterator(I id, Node node, NodeTable *node_table)
: id_(id), node_(node), node_table_(node_table) {}
bool Done() const { return id_ == kNoNodeId; }
const T &Element() const { return node_.element; }
void Next() {
id_ = node_.node_id;
if (id_ != kNoNodeId) node_ = node_table_->FindEntry(id_);
}
private:
I id_; // Iterator set node ID.
Node node_; // Iterator set node.
NodeTable *node_table_;
};
Collection() {}
// Looks up integer ID from ordered multi-se, and if it doesn't exist and
// insert is true, then adds it. Otherwise returns -1.
I FindId(const std::vector<T> &set, bool insert = true) {
I node_id = kNoNodeId;
for (std::ptrdiff_t i = set.size() - 1; i >= 0; --i) {
Node node(node_id, set[i]);
node_id = node_table_.FindId(node, insert);
if (node_id == -1) break;
}
return node_id;
}
// Finds ordered (multi-)set given integer ID. Returns set iterator to
// traverse result.
SetIterator FindSet(I id) {
if (id < 0 || id >= node_table_.Size()) {
return SetIterator(kNoNodeId, Node(kNoNodeId, T()), &node_table_);
} else {
return SetIterator(id, node_table_.FindEntry(id), &node_table_);
}
}
I Size() const { return node_table_.Size(); }
private:
static constexpr I kNoNodeId = -1;
static const std::hash<T> hash_;
NodeTable node_table_;
};
template <class I, class T>
constexpr I Collection<I, T>::kNoNodeId;
template <class I, class T>
const std::hash<T> Collection<I, T>::hash_ = {};
} // namespace fst
#endif // FST_EXTENSIONS_PDT_COLLECTION_H_
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/platform/ort_mutex.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#ifdef _WIN32
#include <Windows.h>
#include <mutex>
namespace onnxruntime {
// Q: Why OrtMutex is better than std::mutex
// A: OrtMutex supports static initialization but std::mutex doesn't. Static initialization helps us prevent the "static
// initialization order problem".
// Q: Why std::mutex can't make it?
// A: VC runtime has to support Windows XP at ABI level. But we don't have such requirement.
// Q: Is OrtMutex faster than std::mutex?
// A: Sure
class OrtMutex {
private:
SRWLOCK data_ = SRWLOCK_INIT;
public:
constexpr OrtMutex() = default;
// SRW locks do not need to be explicitly destroyed.
~OrtMutex() = default;
OrtMutex(const OrtMutex&) = delete;
OrtMutex& operator=(const OrtMutex&) = delete;
void lock() { AcquireSRWLockExclusive(native_handle()); }
bool try_lock() noexcept { return TryAcquireSRWLockExclusive(native_handle()) == TRUE; }
void unlock() noexcept { ReleaseSRWLockExclusive(native_handle()); }
using native_handle_type = SRWLOCK*;
__forceinline native_handle_type native_handle() { return &data_; }
};
class OrtCondVar {
CONDITION_VARIABLE native_cv_object = CONDITION_VARIABLE_INIT;
public:
constexpr OrtCondVar() noexcept = default;
~OrtCondVar() = default;
OrtCondVar(const OrtCondVar&) = delete;
OrtCondVar& operator=(const OrtCondVar&) = delete;
void notify_one() noexcept { WakeConditionVariable(&native_cv_object); }
void notify_all() noexcept { WakeAllConditionVariable(&native_cv_object); }
void wait(std::unique_lock<OrtMutex>& lk) {
if (SleepConditionVariableSRW(&native_cv_object, lk.mutex()->native_handle(), INFINITE, 0) != TRUE) {
std::terminate();
}
}
template <class _Predicate>
void wait(std::unique_lock<OrtMutex>& __lk, _Predicate __pred);
/**
* returns cv_status::timeout if the wait terminates when Rel_time has elapsed. Otherwise, the method returns
* cv_status::no_timeout.
* @param cond_mutex A unique_lock<OrtMutex> object.
* @param rel_time A chrono::duration object that specifies the amount of time before the thread wakes up.
* @return returns cv_status::timeout if the wait terminates when Rel_time has elapsed. Otherwise, the method returns
* cv_status::no_timeout
*/
template <class Rep, class Period>
std::cv_status wait_for(std::unique_lock<OrtMutex>& cond_mutex, const std::chrono::duration<Rep, Period>& rel_time);
using native_handle_type = CONDITION_VARIABLE*;
native_handle_type native_handle() { return &native_cv_object; }
private:
void timed_wait_impl(std::unique_lock<OrtMutex>& __lk,
std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>);
};
template <class _Predicate>
void OrtCondVar::wait(std::unique_lock<OrtMutex>& __lk, _Predicate __pred) {
while (!__pred()) wait(__lk);
}
template <class Rep, class Period>
std::cv_status OrtCondVar::wait_for(std::unique_lock<OrtMutex>& cond_mutex,
const std::chrono::duration<Rep, Period>& rel_time) {
// TODO: is it possible to use nsync_from_time_point_ ?
using namespace std::chrono;
if (rel_time <= duration<Rep, Period>::zero())
return std::cv_status::timeout;
using SystemTimePointFloat = time_point<system_clock, duration<long double, std::nano> >;
using SystemTimePoint = time_point<system_clock, nanoseconds>;
SystemTimePointFloat max_time = SystemTimePoint::max();
steady_clock::time_point steady_now = steady_clock::now();
system_clock::time_point system_now = system_clock::now();
if (max_time - rel_time > system_now) {
nanoseconds remain = duration_cast<nanoseconds>(rel_time);
if (remain < rel_time)
++remain;
timed_wait_impl(cond_mutex, system_now + remain);
} else
timed_wait_impl(cond_mutex, SystemTimePoint::max());
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
} // namespace onnxruntime
#else
#include "nsync.h"
#include <mutex> //for unique_lock
#include <condition_variable> //for cv_status
namespace onnxruntime {
class OrtMutex {
nsync::nsync_mu data_ = NSYNC_MU_INIT;
public:
constexpr OrtMutex() = default;
~OrtMutex() = default;
OrtMutex(const OrtMutex&) = delete;
OrtMutex& operator=(const OrtMutex&) = delete;
void lock() { nsync::nsync_mu_lock(&data_); }
bool try_lock() noexcept { return nsync::nsync_mu_trylock(&data_) == 0; }
void unlock() noexcept { nsync::nsync_mu_unlock(&data_); }
using native_handle_type = nsync::nsync_mu*;
native_handle_type native_handle() { return &data_; }
};
class OrtCondVar {
nsync::nsync_cv native_cv_object = NSYNC_CV_INIT;
public:
constexpr OrtCondVar() noexcept = default;
~OrtCondVar() = default;
OrtCondVar(const OrtCondVar&) = delete;
OrtCondVar& operator=(const OrtCondVar&) = delete;
void notify_one() noexcept { nsync::nsync_cv_signal(&native_cv_object); }
void notify_all() noexcept { nsync::nsync_cv_broadcast(&native_cv_object); }
void wait(std::unique_lock<OrtMutex>& lk);
template <class _Predicate>
void wait(std::unique_lock<OrtMutex>& __lk, _Predicate __pred);
/**
* returns cv_status::timeout if the wait terminates when Rel_time has elapsed. Otherwise, the method returns
* cv_status::no_timeout.
* @param cond_mutex A unique_lock<OrtMutex> object.
* @param rel_time A chrono::duration object that specifies the amount of time before the thread wakes up.
* @return returns cv_status::timeout if the wait terminates when Rel_time has elapsed. Otherwise, the method returns
* cv_status::no_timeout
*/
template <class Rep, class Period>
std::cv_status wait_for(std::unique_lock<OrtMutex>& cond_mutex, const std::chrono::duration<Rep, Period>& rel_time);
using native_handle_type = nsync::nsync_cv*;
native_handle_type native_handle() { return &native_cv_object; }
private:
void timed_wait_impl(std::unique_lock<OrtMutex>& __lk,
std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>);
};
template <class _Predicate>
void OrtCondVar::wait(std::unique_lock<OrtMutex>& __lk, _Predicate __pred) {
while (!__pred()) wait(__lk);
}
template <class Rep, class Period>
std::cv_status OrtCondVar::wait_for(std::unique_lock<OrtMutex>& cond_mutex,
const std::chrono::duration<Rep, Period>& rel_time) {
// TODO: is it possible to use nsync_from_time_point_ ?
using namespace std::chrono;
if (rel_time <= duration<Rep, Period>::zero())
return std::cv_status::timeout;
using SystemTimePointFloat = time_point<system_clock, duration<long double, std::nano> >;
using SystemTimePoint = time_point<system_clock, nanoseconds>;
SystemTimePointFloat max_time = SystemTimePoint::max();
steady_clock::time_point steady_now = steady_clock::now();
system_clock::time_point system_now = system_clock::now();
if (max_time - rel_time > system_now) {
nanoseconds remain = duration_cast<nanoseconds>(rel_time);
if (remain < rel_time)
++remain;
timed_wait_impl(cond_mutex, system_now + remain);
} else
timed_wait_impl(cond_mutex, SystemTimePoint::max());
return steady_clock::now() - steady_now < rel_time ? std::cv_status::no_timeout : std::cv_status::timeout;
}
}; // namespace onnxruntime
#endif
| 0 |
coqui_public_repos/STT-models/breton/itml | coqui_public_repos/STT-models/breton/itml/v0.1.0/LICENSE | GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
| 0 |
coqui_public_repos/STT-models/totonac/bozden | coqui_public_repos/STT-models/totonac/bozden/v1.0.0/LICENSE | https://creativecommons.org/licenses/by-nc-sa/3.0/
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/equivalent.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/equivalent.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
bool Equivalent(const FstClass &fst1, const FstClass &fst2, float delta) {
if (!internal::ArcTypesMatch(fst1, fst2, "Equivalent")) return false;
EquivalentInnerArgs iargs(fst1, fst2, delta);
EquivalentArgs args(iargs);
Apply<Operation<EquivalentArgs>>("Equivalent", fst1.ArcType(), &args);
return args.retval;
}
REGISTER_FST_OPERATION(Equivalent, StdArc, EquivalentArgs);
REGISTER_FST_OPERATION(Equivalent, LogArc, EquivalentArgs);
REGISTER_FST_OPERATION(Equivalent, Log64Arc, EquivalentArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/compress/Makefile.in | # Makefile.in generated by automake 1.15.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2017 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = { \
if test -z '$(MAKELEVEL)'; then \
false; \
elif test -n '$(MAKE_HOST)'; then \
true; \
elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
true; \
else \
false; \
fi; \
}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
@HAVE_BIN_TRUE@bin_PROGRAMS = fstcompress$(EXEEXT) fstrandmod$(EXEEXT)
subdir = src/extensions/compress
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"
LTLIBRARIES = $(lib_LTLIBRARIES)
am__DEPENDENCIES_1 =
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_DEPENDENCIES = \
@HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__libfstcompressscript_la_SOURCES_DIST = compress-script.cc
@HAVE_SCRIPT_TRUE@am_libfstcompressscript_la_OBJECTS = \
@HAVE_SCRIPT_TRUE@ compress-script.lo
libfstcompressscript_la_OBJECTS = \
$(am_libfstcompressscript_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
libfstcompressscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(libfstcompressscript_la_LDFLAGS) \
$(LDFLAGS) -o $@
@HAVE_SCRIPT_TRUE@am_libfstcompressscript_la_rpath = -rpath $(libdir)
PROGRAMS = $(bin_PROGRAMS)
am__fstcompress_SOURCES_DIST = fstcompress.cc
@HAVE_BIN_TRUE@am_fstcompress_OBJECTS = fstcompress.$(OBJEXT)
fstcompress_OBJECTS = $(am_fstcompress_OBJECTS)
fstcompress_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstcompress_DEPENDENCIES = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__fstrandmod_SOURCES_DIST = fstrandmod.cc
@HAVE_BIN_TRUE@am_fstrandmod_OBJECTS = fstrandmod.$(OBJEXT)
fstrandmod_OBJECTS = $(am_fstrandmod_OBJECTS)
fstrandmod_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstrandmod_DEPENDENCIES = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES =
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CXXFLAGS) $(CXXFLAGS)
AM_V_CXX = $(am__v_CXX_@AM_V@)
am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
am__v_CXX_0 = @echo " CXX " $@;
am__v_CXX_1 =
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
am__v_CXXLD_0 = @echo " CXXLD " $@;
am__v_CXXLD_1 =
SOURCES = $(libfstcompressscript_la_SOURCES) $(fstcompress_SOURCES) \
$(fstrandmod_SOURCES)
DIST_SOURCES = $(am__libfstcompressscript_la_SOURCES_DIST) \
$(am__fstcompress_SOURCES_DIST) $(am__fstrandmod_SOURCES_DIST)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
runstatedir = @runstatedir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS)
@HAVE_BIN_TRUE@LDADD = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la \
@HAVE_BIN_TRUE@ -lm $(DL_LIBS)
@HAVE_BIN_TRUE@fstcompress_SOURCES = fstcompress.cc
@HAVE_BIN_TRUE@fstrandmod_SOURCES = fstrandmod.cc
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_SOURCES = compress-script.cc
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_LDFLAGS = -version-info 13:0:0
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_LIBADD = \
@HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lz -lm $(DL_LIBS)
@HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstcompressscript.la
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/compress/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/compress/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
}
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
libfstcompressscript.la: $(libfstcompressscript_la_OBJECTS) $(libfstcompressscript_la_DEPENDENCIES) $(EXTRA_libfstcompressscript_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libfstcompressscript_la_LINK) $(am_libfstcompressscript_la_rpath) $(libfstcompressscript_la_OBJECTS) $(libfstcompressscript_la_LIBADD) $(LIBS)
install-binPROGRAMS: $(bin_PROGRAMS)
@$(NORMAL_INSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
$(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
while read p p1; do if test -f $$p \
|| test -f $$p1 \
; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
sed -e 'p;s,.*/,,;n;h' \
-e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
{ d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
if ($$2 == $$4) files[d] = files[d] " " $$1; \
else { print "f", $$3 "/" $$4, $$1; } } \
END { for (d in files) print "f", d, files[d] }' | \
while read type dir files; do \
if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
test -z "$$files" || { \
echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
$(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
} \
; done
uninstall-binPROGRAMS:
@$(NORMAL_UNINSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
-e 's/$$/$(EXEEXT)/' \
`; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(bindir)" && rm -f $$files
clean-binPROGRAMS:
@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
echo " rm -f" $$list; \
rm -f $$list || exit $$?; \
test -n "$(EXEEXT)" || exit 0; \
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
fstcompress$(EXEEXT): $(fstcompress_OBJECTS) $(fstcompress_DEPENDENCIES) $(EXTRA_fstcompress_DEPENDENCIES)
@rm -f fstcompress$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstcompress_OBJECTS) $(fstcompress_LDADD) $(LIBS)
fstrandmod$(EXEEXT): $(fstrandmod_OBJECTS) $(fstrandmod_DEPENDENCIES) $(EXTRA_fstrandmod_DEPENDENCIES)
@rm -f fstrandmod$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstrandmod_OBJECTS) $(fstrandmod_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compress-script.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstcompress.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstrandmod.Po@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
install-binPROGRAMS: install-libLTLIBRARIES
installdirs:
for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libtool mostlyclean-am
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-binPROGRAMS install-libLTLIBRARIES
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libtool cscopelist-am ctags ctags-am distclean \
distclean-compile distclean-generic distclean-libtool \
distclean-tags distdir dvi dvi-am html html-am info info-am \
install install-am install-binPROGRAMS install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-libLTLIBRARIES install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \
uninstall-libLTLIBRARIES
.PRECIOUS: Makefile
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos/STT-models/latvian/itml | coqui_public_repos/STT-models/latvian/itml/v0.1.1/MODEL_CARD.md | # Model card for Latvian STT
Jump to section:
- [Model details](#model-details)
- [Intended use](#intended-use)
- [Performance Factors](#performance-factors)
- [Metrics](#metrics)
- [Training data](#training-data)
- [Evaluation data](#evaluation-data)
- [Ethical considerations](#ethical-considerations)
- [Caveats and recommendations](#caveats-and-recommendations)
## Model details
- Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group.
- Model language: Latvian / Latviešu valoda / `lv`
- Model date: April 26, 2021
- Model type: `Speech-to-Text`
- Model version: `v0.1.1`
- Compatible with 🐸 STT version: `v0.9.3`
- License: AGPL
- Citation details: `@techreport{latvian-stt, author = {Tyers,Francis}, title = {Latvian STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-LV-0.1} }`
- Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/).
## Intended use
Speech-to-Text for the [Latvian Language](https://en.wikipedia.org/wiki/Latvian_language) on 16kHz, mono-channel audio.
## Performance Factors
Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
## Metrics
STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk.
#### Transcription Accuracy
The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/lv/).
|Test Corpus|WER|CER|
|-----------|---|---|
|Common Voice|82.8\%|28.3\%|
#### Real-Time Factor
Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF.
Recorded average RTF on laptop CPU: ``
#### Model Size
`model.pbmm`: 181M
`model.tflite`: 46M
### Approaches to uncertainty and variability
Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio.
## Training data
This model was trained on Common Voice 6.1 train.
## Evaluation data
The Model was evaluated on Common Voice 6.1 test.
## Ethical considerations
Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use.
### Demographic Bias
You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue.
### Surveillance
Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech.
## Caveats and recommendations
Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/optimizer/rule_based_graph_transformer.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/graph/graph_viewer.h"
#include "core/optimizer/graph_transformer.h"
#include "core/optimizer/rewrite_rule.h"
namespace onnxruntime {
/**
@class RuleBasedGraphTransformer
Rule-based graph transformer that provides an API to register rewrite rules
and an API to apply all applicable rules to a Graph.
Represents an IGraphTransformer determined by a set of rewrite rules.
The transformer will apply all the rewrite rules iteratively as determined by the underlying rewriting strategy.
Several rewriting-strategies are possible when traversing the graph and applying rewrite rules,
each with different trade offs. At the moment, we define one that performs top-down traversal of nodes.
@TODO: Is a bottom-up traversal more efficient?
@TODO: Is it worth adding the max number of passes a rule should be applied for?
@TODO: We need to define a contract about whether a rewrite rule is allowed to leave
the graph in an inconsistent state (this will determine when and where we will be
calling Graph::resolve().
*/
class RuleBasedGraphTransformer : public GraphTransformer {
public:
RuleBasedGraphTransformer(const std::string& name,
const std::unordered_set<std::string>& compatible_execution_providers = {})
: GraphTransformer(name, compatible_execution_providers) {}
/** Registers a rewrite rule in this transformer. */
Status Register(std::unique_ptr<RewriteRule> rule);
/** Gets the list of registered rewrite rules that will be triggered on nodes with the given op type
by this rule-based transformer.
@returns a pointer to the vector containing all the registered rewrite rules. */
const std::vector<std::reference_wrapper<const RewriteRule>>* GetRewriteRulesForOpType(const std::string& op_type) const {
auto rules = op_type_to_rules_.find(op_type);
return (rules != op_type_to_rules_.cend()) ? &rules->second : nullptr;
}
/** Gets the rewrite rules that are evaluated on all nodes irrespective of their op type.
@returns a pointer to the vector containing all such rewrite rules or nullptr if no such rule. */
const std::vector<std::reference_wrapper<const RewriteRule>>* GetAnyOpRewriteRules() const {
return &any_op_type_rules_;
}
/** Returns the total number of rules that are registered in this transformer. */
size_t RulesCount() const;
protected:
/** Applies the given set of rewrite rules on the Node of this Graph.
@param[in] graph The Graph.
@param[in] node The Node to apply the rules to.
@param[in] rules The vector of RewriteRules that will be applied to the Node.
@param[out] rule_effect Enum that indicates whether and how the graph was modified as a result of
applying rules on this node.
@returns Status indicating success or providing error information. */
common::Status ApplyRulesOnNode(Graph& graph, Node& node,
const std::vector<std::reference_wrapper<const RewriteRule>>& rules,
RewriteRule::RewriteRuleEffect& rule_effect, const logging::Logger& logger) const;
private:
using RuleEffect = RewriteRule::RewriteRuleEffect;
// The list of unique pointers for all rules (so that rules can be registered for several op types).
std::vector<std::unique_ptr<RewriteRule>> rules_;
// Map that associates a node's op type with the vector of rules that are registered to be triggered for that node.
std::unordered_map<std::string, std::vector<std::reference_wrapper<const RewriteRule>>> op_type_to_rules_;
// Rules that will be evaluated regardless of the op type of the node.
std::vector<std::reference_wrapper<const RewriteRule>> any_op_type_rules_;
// Performs a single top-down traversal of the graph and applies all registered rules.
common::Status ApplyImpl(Graph& graph, bool& modified, int graph_level, const logging::Logger& logger) const override;
};
} // namespace onnxruntime
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/util/parallel_read.hh | #ifndef UTIL_PARALLEL_READ__
#define UTIL_PARALLEL_READ__
/* Read pieces of a file in parallel. This has a very specific use case:
* reading files from Lustre is CPU bound so multiple threads actually
* increases throughput. Speed matters when an LM takes a terabyte.
*/
#include <cstddef>
#include <stdint.h>
namespace util {
void ParallelRead(int fd, void *to, std::size_t amount, uint64_t offset);
} // namespace util
#endif // UTIL_PARALLEL_READ__
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/CMakeLists.txt |
#-DHAVE_CONFIG_H -I./../include -fno-exceptions -funsigned-char -std=c++11 -MT symbol-table.lo -MD -MP -MF .deps/symbol-table.Tpo -c symbol-table.cc -fno-common -DPIC -o .libs/symbol-table.o
include_directories(./include/)
install(DIRECTORY include/ DESTINATION include/
FILES_MATCHING PATTERN "*.h")
add_subdirectory(lib)
add_subdirectory(script)
if(HAVE_BIN)
add_subdirectory(bin)
endif(HAVE_BIN)
add_subdirectory(extensions)
enable_testing()
add_subdirectory(test)
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/bin/run-ci-ldc93s1_singleshotinference.sh | #!/bin/sh
set -xe
ldc93s1_dir="./data/smoke_test"
ldc93s1_csv="${ldc93s1_dir}/ldc93s1.csv"
if [ ! -f "${ldc93s1_dir}/ldc93s1.csv" ]; then
echo "Downloading and preprocessing LDC93S1 example data, saving in ${ldc93s1_dir}."
python -u bin/import_ldc93s1.py ${ldc93s1_dir}
fi;
# Force only one visible device because we have a single-sample dataset
# and when trying to run on multiple devices (like GPUs), this will break
export CUDA_VISIBLE_DEVICES=0
python -m coqui_stt_training.train \
--alphabet_config_path "data/alphabet.txt" \
--show_progressbar false --early_stop false \
--train_files ${ldc93s1_csv} --train_batch_size 1 \
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
--test_files ${ldc93s1_csv} --test_batch_size 1 \
--n_hidden 100 --epochs 1 \
--max_to_keep 1 --checkpoint_dir '/tmp/ckpt' --checkpoint_secs 0 \
--learning_rate 0.001 --dropout_rate 0.05 \
--scorer_path 'data/smoke_test/pruned_lm.scorer'
python -m coqui_stt_training.training_graph_inference \
--n_hidden 100 \
--checkpoint_dir '/tmp/ckpt' \
--scorer_path 'data/smoke_test/pruned_lm.scorer' \
--one_shot_infer 'data/smoke_test/LDC93S1.wav'
python -m coqui_stt_training.training_graph_inference_flashlight \
--n_hidden 100 \
--checkpoint_dir '/tmp/ckpt' \
--scorer_path 'data/smoke_test/pruned_lm.scorer' \
--vocab_file 'data/smoke_test/vocab.pruned.txt' \
--one_shot_infer 'data/smoke_test/LDC93S1.wav'
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/Makefile.am | if HAVE_COMPRESS
compress_include_headers = fst/extensions/compress/compress.h \
fst/extensions/compress/compress-script.h fst/extensions/compress/gzfile.h \
fst/extensions/compress/elias.h fst/extensions/compress/randmod.h
endif
if HAVE_FAR
far_include_headers = fst/extensions/far/compile-strings.h \
fst/extensions/far/create.h fst/extensions/far/equal.h \
fst/extensions/far/extract.h fst/extensions/far/far.h \
fst/extensions/far/far-class.h fst/extensions/far/farlib.h \
fst/extensions/far/farscript.h fst/extensions/far/getters.h \
fst/extensions/far/info.h fst/extensions/far/isomorphic.h \
fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \
fst/extensions/far/stlist.h fst/extensions/far/sttable.h
endif
if HAVE_LINEAR
linear_include_headers = fst/extensions/linear/linear-fst-data-builder.h \
fst/extensions/linear/linear-fst-data.h fst/extensions/linear/linear-fst.h \
fst/extensions/linear/linearscript.h fst/extensions/linear/loglinear-apply.h \
fst/extensions/linear/trie.h
endif
if HAVE_MPDT
mpdt_include_headers = fst/extensions/mpdt/compose.h \
fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \
fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \
fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \
fst/extensions/mpdt/reverse.h
endif
if HAVE_NGRAM
ngram_include_headers = fst/extensions/ngram/bitmap-index.h \
fst/extensions/ngram/ngram-fst.h fst/extensions/ngram/nthbit.h
endif
if HAVE_PDT
pdt_include_headers = fst/extensions/pdt/collection.h \
fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \
fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \
fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \
fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \
fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \
fst/extensions/pdt/shortest-path.h
endif
if HAVE_SPECIAL
special_include_headers = fst/extensions/special/phi-fst.h \
fst/extensions/special/rho-fst.h fst/extensions/special/sigma-fst.h
endif
if HAVE_GRM
far_include_headers = fst/extensions/far/compile-strings.h \
fst/extensions/far/create.h fst/extensions/far/equal.h \
fst/extensions/far/extract.h fst/extensions/far/far.h \
fst/extensions/far/far-class.h fst/extensions/far/farlib.h \
fst/extensions/far/farscript.h fst/extensions/far/getters.h \
fst/extensions/far/info.h fst/extensions/far/isomorphic.h \
fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \
fst/extensions/far/stlist.h fst/extensions/far/sttable.h
mpdt_include_headers = fst/extensions/mpdt/compose.h \
fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \
fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \
fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \
fst/extensions/mpdt/reverse.h
pdt_include_headers = fst/extensions/pdt/collection.h \
fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \
fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \
fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \
fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \
fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \
fst/extensions/pdt/shortest-path.h
endif
script_include_headers = fst/script/arc-class.h \
fst/script/arciterator-class.h fst/script/arcsort.h \
fst/script/arg-packs.h fst/script/closure.h fst/script/compile-impl.h \
fst/script/compile.h fst/script/compose.h fst/script/concat.h \
fst/script/connect.h fst/script/convert.h fst/script/decode.h \
fst/script/determinize.h fst/script/difference.h fst/script/disambiguate.h \
fst/script/draw-impl.h fst/script/draw.h fst/script/encode.h \
fst/script/encodemapper-class.h fst/script/epsnormalize.h fst/script/equal.h \
fst/script/equivalent.h fst/script/fst-class.h fst/script/fstscript.h \
fst/script/getters.h fst/script/info-impl.h fst/script/info.h \
fst/script/intersect.h fst/script/invert.h fst/script/isomorphic.h \
fst/script/map.h fst/script/minimize.h fst/script/print-impl.h \
fst/script/print.h fst/script/project.h fst/script/prune.h \
fst/script/push.h fst/script/randequivalent.h fst/script/randgen.h \
fst/script/register.h fst/script/relabel.h fst/script/replace.h \
fst/script/reverse.h fst/script/reweight.h fst/script/rmepsilon.h \
fst/script/script-impl.h fst/script/shortest-distance.h \
fst/script/shortest-path.h fst/script/stateiterator-class.h \
fst/script/synchronize.h fst/script/text-io.h fst/script/topsort.h \
fst/script/union.h fst/script/weight-class.h fst/script/fstscript-decl.h \
fst/script/verify.h
test_include_headers = fst/test/algo_test.h fst/test/fst_test.h \
fst/test/rand-fst.h fst/test/weight-tester.h
nobase_include_HEADERS = fst/accumulator.h fst/add-on.h fst/arc-arena.h \
fst/arc-map.h fst/arc.h fst/arcfilter.h fst/arcsort.h fst/bi-table.h \
fst/cache.h fst/closure.h fst/compact-fst.h fst/compat.h fst/complement.h \
fst/compose-filter.h fst/compose.h fst/concat.h fst/config.h fst/connect.h \
fst/const-fst.h fst/determinize.h fst/dfs-visit.h fst/difference.h \
fst/disambiguate.h fst/edit-fst.h fst/encode.h fst/epsnormalize.h fst/equal.h \
fst/equivalent.h fst/expanded-fst.h fst/expectation-weight.h \
fst/factor-weight.h fst/filter-state.h fst/flags.h fst/float-weight.h \
fst/fst-decl.h fst/fst.h fst/fstlib.h fst/generic-register.h fst/heap.h \
fst/icu.h fst/intersect.h fst/interval-set.h fst/invert.h fst/isomorphic.h \
fst/label-reachable.h fst/lexicographic-weight.h fst/lock.h fst/log.h \
fst/lookahead-filter.h fst/lookahead-matcher.h fst/map.h fst/mapped-file.h \
fst/matcher-fst.h fst/matcher.h fst/memory.h fst/minimize.h fst/mutable-fst.h \
fst/pair-weight.h fst/partition.h fst/power-weight.h fst/product-weight.h \
fst/project.h fst/properties.h fst/prune.h fst/push.h fst/queue.h \
fst/randequivalent.h fst/randgen.h fst/rational.h fst/register.h \
fst/relabel.h fst/replace-util.h fst/replace.h fst/reverse.h fst/reweight.h \
fst/rmepsilon.h fst/rmfinalepsilon.h fst/set-weight.h fst/shortest-distance.h \
fst/shortest-path.h fst/signed-log-weight.h fst/sparse-power-weight.h \
fst/sparse-tuple-weight.h fst/state-map.h fst/state-reachable.h \
fst/state-table.h fst/statesort.h fst/string-weight.h fst/string.h \
fst/symbol-table-ops.h fst/symbol-table.h fst/synchronize.h \
fst/test-properties.h fst/topsort.h fst/tuple-weight.h fst/types.h \
fst/union-find.h fst/union-weight.h fst/union.h fst/util.h fst/vector-fst.h \
fst/verify.h fst/visit.h fst/weight.h \
$(compress_include_headers) \
$(far_include_headers) \
$(linear_include_headers) \
$(mpdt_include_headers) \
$(ngram_include_headers) \
$(pdt_include_headers) \
$(script_include_headers) \
$(special_include_headers) \
$(test_include_headers)
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/rmepsilon.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions and classes that implemement epsilon-removal.
#ifndef FST_RMEPSILON_H_
#define FST_RMEPSILON_H_
#include <forward_list>
#include <stack>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/arcfilter.h>
#include <fst/cache.h>
#include <fst/connect.h>
#include <fst/factor-weight.h>
#include <fst/invert.h>
#include <fst/prune.h>
#include <fst/queue.h>
#include <fst/shortest-distance.h>
#include <fst/topsort.h>
namespace fst {
template <class Arc, class Queue>
class RmEpsilonOptions
: public ShortestDistanceOptions<Arc, Queue, EpsilonArcFilter<Arc>> {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
bool connect; // Connect output
Weight weight_threshold; // Pruning weight threshold.
StateId state_threshold; // Pruning state threshold.
explicit RmEpsilonOptions(Queue *queue, float delta = kShortestDelta,
bool connect = true,
Weight weight_threshold = Weight::Zero(),
StateId state_threshold = kNoStateId)
: ShortestDistanceOptions<Arc, Queue, EpsilonArcFilter<Arc>>(
queue, EpsilonArcFilter<Arc>(), kNoStateId, delta),
connect(connect),
weight_threshold(std::move(weight_threshold)),
state_threshold(state_threshold) {}
};
namespace internal {
// Computation state of the epsilon-removal algorithm.
template <class Arc, class Queue>
class RmEpsilonState {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
RmEpsilonState(const Fst<Arc> &fst, std::vector<Weight> *distance,
const RmEpsilonOptions<Arc, Queue> &opts)
: fst_(fst),
distance_(distance),
sd_state_(fst_, distance, opts, true),
expand_id_(0) {}
void Expand(StateId s);
std::vector<Arc> &Arcs() { return arcs_; }
const Weight &Final() const { return final_; }
bool Error() const { return sd_state_.Error(); }
private:
struct Element {
Label ilabel;
Label olabel;
StateId nextstate;
Element() {}
Element(Label ilabel, Label olabel, StateId nexstate)
: ilabel(ilabel), olabel(olabel), nextstate(nexstate) {}
};
struct ElementHash {
public:
size_t operator()(const Element &element) const {
static constexpr size_t prime0 = 7853;
static constexpr size_t prime1 = 7867;
return static_cast<size_t>(element.nextstate) +
static_cast<size_t>(element.ilabel) * prime0 +
static_cast<size_t>(element.olabel) * prime1;
}
};
class ElementEqual {
public:
bool operator()(const Element &e1, const Element &e2) const {
return (e1.ilabel == e2.ilabel) && (e1.olabel == e2.olabel) &&
(e1.nextstate == e2.nextstate);
}
};
using ElementMap = std::unordered_map<Element, std::pair<StateId, size_t>,
ElementHash, ElementEqual>;
const Fst<Arc> &fst_;
// Distance from state being expanded in epsilon-closure.
std::vector<Weight> *distance_;
// Shortest distance algorithm computation state.
internal::ShortestDistanceState<Arc, Queue, EpsilonArcFilter<Arc>> sd_state_;
// Maps an element to a pair corresponding to a position in the arcs vector
// of the state being expanded. The element corresopnds to the position in
// the arcs_ vector if p.first is equal to the state being expanded.
ElementMap element_map_;
EpsilonArcFilter<Arc> eps_filter_;
std::stack<StateId> eps_queue_; // Queue used to visit the epsilon-closure.
std::vector<bool> visited_; // True if the state has been visited.
std::forward_list<StateId> visited_states_; // List of visited states.
std::vector<Arc> arcs_; // Arcs of state being expanded.
Weight final_; // Final weight of state being expanded.
StateId expand_id_; // Unique ID for each call to Expand
RmEpsilonState(const RmEpsilonState &) = delete;
RmEpsilonState &operator=(const RmEpsilonState &) = delete;
};
template <class Arc, class Queue>
void RmEpsilonState<Arc, Queue>::Expand(typename Arc::StateId source) {
final_ = Weight::Zero();
arcs_.clear();
sd_state_.ShortestDistance(source);
if (sd_state_.Error()) return;
eps_queue_.push(source);
while (!eps_queue_.empty()) {
const auto state = eps_queue_.top();
eps_queue_.pop();
while (visited_.size() <= state) visited_.push_back(false);
if (visited_[state]) continue;
visited_[state] = true;
visited_states_.push_front(state);
for (ArcIterator<Fst<Arc>> aiter(fst_, state); !aiter.Done();
aiter.Next()) {
auto arc = aiter.Value();
arc.weight = Times((*distance_)[state], arc.weight);
if (eps_filter_(arc)) {
while (visited_.size() <= arc.nextstate) visited_.push_back(false);
if (!visited_[arc.nextstate]) eps_queue_.push(arc.nextstate);
} else {
const Element element(arc.ilabel, arc.olabel, arc.nextstate);
auto insert_result = element_map_.insert(
std::make_pair(element, std::make_pair(expand_id_, arcs_.size())));
if (insert_result.second) {
arcs_.push_back(arc);
} else {
if (insert_result.first->second.first == expand_id_) {
auto &weight = arcs_[insert_result.first->second.second].weight;
weight = Plus(weight, arc.weight);
} else {
insert_result.first->second.first = expand_id_;
insert_result.first->second.second = arcs_.size();
arcs_.push_back(arc);
}
}
}
}
final_ = Plus(final_, Times((*distance_)[state], fst_.Final(state)));
}
while (!visited_states_.empty()) {
visited_[visited_states_.front()] = false;
visited_states_.pop_front();
}
++expand_id_;
}
} // namespace internal
// Removes epsilon-transitions (when both the input and output label are an
// epsilon) from a transducer. The result will be an equivalent FST that has no
// such epsilon transitions. This version modifies its input. It allows fine
// control via the options argument; see below for a simpler interface.
//
// The distance vector will be used to hold the shortest distances during the
// epsilon-closure computation. The state queue discipline and convergence delta
// are taken in the options argument.
template <class Arc, class Queue>
void RmEpsilon(MutableFst<Arc> *fst,
std::vector<typename Arc::Weight> *distance,
const RmEpsilonOptions<Arc, Queue> &opts) {
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
if (fst->Start() == kNoStateId) return;
// noneps_in[s] will be set to true iff s admits a non-epsilon incoming
// transition or is the start state.
std::vector<bool> noneps_in(fst->NumStates(), false);
noneps_in[fst->Start()] = true;
for (size_t i = 0; i < fst->NumStates(); ++i) {
for (ArcIterator<Fst<Arc>> aiter(*fst, i); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
if (arc.ilabel != 0 || arc.olabel != 0) {
noneps_in[arc.nextstate] = true;
}
}
}
// States sorted in topological order when (acyclic) or generic topological
// order (cyclic).
std::vector<StateId> states;
states.reserve(fst->NumStates());
if (fst->Properties(kTopSorted, false) & kTopSorted) {
for (size_t i = 0; i < fst->NumStates(); i++) states.push_back(i);
} else if (fst->Properties(kAcyclic, false) & kAcyclic) {
std::vector<StateId> order;
bool acyclic;
TopOrderVisitor<Arc> top_order_visitor(&order, &acyclic);
DfsVisit(*fst, &top_order_visitor, EpsilonArcFilter<Arc>());
// Sanity check: should be acyclic if property bit is set.
if (!acyclic) {
FSTERROR() << "RmEpsilon: Inconsistent acyclic property bit";
fst->SetProperties(kError, kError);
return;
}
states.resize(order.size());
for (StateId i = 0; i < order.size(); i++) states[order[i]] = i;
} else {
uint64_t props;
std::vector<StateId> scc;
SccVisitor<Arc> scc_visitor(&scc, nullptr, nullptr, &props);
DfsVisit(*fst, &scc_visitor, EpsilonArcFilter<Arc>());
std::vector<StateId> first(scc.size(), kNoStateId);
std::vector<StateId> next(scc.size(), kNoStateId);
for (StateId i = 0; i < scc.size(); i++) {
if (first[scc[i]] != kNoStateId) next[i] = first[scc[i]];
first[scc[i]] = i;
}
for (StateId i = 0; i < first.size(); i++) {
for (auto j = first[i]; j != kNoStateId; j = next[j]) {
states.push_back(j);
}
}
}
internal::RmEpsilonState<Arc, Queue> rmeps_state(*fst, distance, opts);
while (!states.empty()) {
const auto state = states.back();
states.pop_back();
if (!noneps_in[state] &&
(opts.connect || opts.weight_threshold != Weight::Zero() ||
opts.state_threshold != kNoStateId)) {
continue;
}
rmeps_state.Expand(state);
fst->SetFinal(state, rmeps_state.Final());
fst->DeleteArcs(state);
auto &arcs = rmeps_state.Arcs();
fst->ReserveArcs(state, arcs.size());
while (!arcs.empty()) {
fst->AddArc(state, arcs.back());
arcs.pop_back();
}
}
if (opts.connect || opts.weight_threshold != Weight::Zero() ||
opts.state_threshold != kNoStateId) {
for (size_t s = 0; s < fst->NumStates(); ++s) {
if (!noneps_in[s]) fst->DeleteArcs(s);
}
}
if (rmeps_state.Error()) fst->SetProperties(kError, kError);
fst->SetProperties(
RmEpsilonProperties(fst->Properties(kFstProperties, false)),
kFstProperties);
if (opts.weight_threshold != Weight::Zero() ||
opts.state_threshold != kNoStateId) {
Prune(fst, opts.weight_threshold, opts.state_threshold);
}
if (opts.connect && opts.weight_threshold == Weight::Zero() &&
opts.state_threshold == kNoStateId) {
Connect(fst);
}
}
// Removes epsilon-transitions (when both the input and output label
// are an epsilon) from a transducer. The result will be an equivalent
// FST that has no such epsilon transitions. This version modifies its
// input. It has a simplified interface; see above for a version that
// allows finer control.
//
// Complexity:
//
// - Time:
//
// Unweighted: O(v^2 + ve).
// Acyclic: O(v^2 + V e).
// Tropical semiring: O(v^2 log V + ve).
// General: exponential.
//
// - Space: O(vE)
//
// where v is the number of states visited and e is the number of arcs visited.
//
// For more information, see:
//
// Mohri, M. 2002. Generic epsilon-removal and input epsilon-normalization
// algorithms for weighted transducers. International Journal of Computer
// Science 13(1): 129-143.
template <class Arc>
void RmEpsilon(MutableFst<Arc> *fst, bool connect = true,
typename Arc::Weight weight_threshold = Arc::Weight::Zero(),
typename Arc::StateId state_threshold = kNoStateId,
float delta = kShortestDelta) {
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
std::vector<Weight> distance;
AutoQueue<StateId> state_queue(*fst, &distance, EpsilonArcFilter<Arc>());
RmEpsilonOptions<Arc, AutoQueue<StateId>> opts(
&state_queue, delta, connect, weight_threshold, state_threshold);
RmEpsilon(fst, &distance, opts);
}
struct RmEpsilonFstOptions : CacheOptions {
float delta;
explicit RmEpsilonFstOptions(const CacheOptions &opts,
float delta = kShortestDelta)
: CacheOptions(opts), delta(delta) {}
explicit RmEpsilonFstOptions(float delta = kShortestDelta) : delta(delta) {}
};
namespace internal {
// Implementation of delayed RmEpsilonFst.
template <class Arc>
class RmEpsilonFstImpl : public CacheImpl<Arc> {
public:
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Store = DefaultCacheStore<Arc>;
using State = typename Store::State;
using FstImpl<Arc>::Properties;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using CacheBaseImpl<CacheState<Arc>>::HasArcs;
using CacheBaseImpl<CacheState<Arc>>::HasFinal;
using CacheBaseImpl<CacheState<Arc>>::HasStart;
using CacheBaseImpl<CacheState<Arc>>::PushArc;
using CacheBaseImpl<CacheState<Arc>>::SetArcs;
using CacheBaseImpl<CacheState<Arc>>::SetFinal;
using CacheBaseImpl<CacheState<Arc>>::SetStart;
RmEpsilonFstImpl(const Fst<Arc> &fst, const RmEpsilonFstOptions &opts)
: CacheImpl<Arc>(opts),
fst_(fst.Copy()),
delta_(opts.delta),
rmeps_state_(
*fst_, &distance_,
RmEpsilonOptions<Arc, FifoQueue<StateId>>(&queue_, delta_, false)) {
SetType("rmepsilon");
SetProperties(
RmEpsilonProperties(fst.Properties(kFstProperties, false), true),
kCopyProperties);
SetInputSymbols(fst.InputSymbols());
SetOutputSymbols(fst.OutputSymbols());
}
RmEpsilonFstImpl(const RmEpsilonFstImpl &impl)
: CacheImpl<Arc>(impl),
fst_(impl.fst_->Copy(true)),
delta_(impl.delta_),
rmeps_state_(
*fst_, &distance_,
RmEpsilonOptions<Arc, FifoQueue<StateId>>(&queue_, delta_, false)) {
SetType("rmepsilon");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
}
StateId Start() {
if (!HasStart()) SetStart(fst_->Start());
return CacheImpl<Arc>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) Expand(s);
return CacheImpl<Arc>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumOutputEpsilons(s);
}
uint64_t Properties() const override { return Properties(kFstProperties); }
// Sets error if found and returns other FST impl properties.
uint64_t Properties(uint64_t mask) const override {
if ((mask & kError) &&
(fst_->Properties(kError, false) || rmeps_state_.Error())) {
SetProperties(kError, kError);
}
return FstImpl<Arc>::Properties(mask);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) {
if (!HasArcs(s)) Expand(s);
CacheImpl<Arc>::InitArcIterator(s, data);
}
void Expand(StateId s) {
rmeps_state_.Expand(s);
SetFinal(s, rmeps_state_.Final());
auto &arcs = rmeps_state_.Arcs();
while (!arcs.empty()) {
PushArc(s, arcs.back());
arcs.pop_back();
}
SetArcs(s);
}
private:
std::unique_ptr<const Fst<Arc>> fst_;
float delta_;
std::vector<Weight> distance_;
FifoQueue<StateId> queue_;
internal::RmEpsilonState<Arc, FifoQueue<StateId>> rmeps_state_;
};
} // namespace internal
// Removes epsilon-transitions (when both the input and output label are an
// epsilon) from a transducer. The result will be an equivalent FST that has no
// such epsilon transitions. This version is a
// delayed FST.
//
// Complexity:
//
// - Time:
// Unweighted: O(v^2 + ve).
// General: exponential.
//
// - Space: O(vE)
//
// where v is the number of states visited and e is the number of arcs visited.
// Constant time to visit an input state or arc is assumed and exclusive of
// caching.
//
// For more information, see:
//
// Mohri, M. 2002. Generic epsilon-removal and input epsilon-normalization
// algorithms for weighted transducers. International Journal of Computer
// Science 13(1): 129-143.
//
// This class attaches interface to implementation and handles
// reference counting, delegating most methods to ImplToFst.
template <class A>
class RmEpsilonFst : public ImplToFst<internal::RmEpsilonFstImpl<A>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Store = DefaultCacheStore<Arc>;
using State = typename Store::State;
using Impl = internal::RmEpsilonFstImpl<Arc>;
friend class ArcIterator<RmEpsilonFst<Arc>>;
friend class StateIterator<RmEpsilonFst<Arc>>;
explicit RmEpsilonFst(const Fst<Arc> &fst)
: ImplToFst<Impl>(std::make_shared<Impl>(fst, RmEpsilonFstOptions())) {}
RmEpsilonFst(const Fst<A> &fst, const RmEpsilonFstOptions &opts)
: ImplToFst<Impl>(std::make_shared<Impl>(fst, opts)) {}
// See Fst<>::Copy() for doc.
RmEpsilonFst(const RmEpsilonFst<Arc> &fst, bool safe = false)
: ImplToFst<Impl>(fst, safe) {}
// Get a copy of this RmEpsilonFst. See Fst<>::Copy() for further doc.
RmEpsilonFst<Arc> *Copy(bool safe = false) const override {
return new RmEpsilonFst<Arc>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<Arc> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
RmEpsilonFst &operator=(const RmEpsilonFst &) = delete;
};
// Specialization for RmEpsilonFst.
template <class Arc>
class StateIterator<RmEpsilonFst<Arc>>
: public CacheStateIterator<RmEpsilonFst<Arc>> {
public:
explicit StateIterator(const RmEpsilonFst<Arc> &fst)
: CacheStateIterator<RmEpsilonFst<Arc>>(fst, fst.GetMutableImpl()) {}
};
// Specialization for RmEpsilonFst.
template <class Arc>
class ArcIterator<RmEpsilonFst<Arc>>
: public CacheArcIterator<RmEpsilonFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const RmEpsilonFst<Arc> &fst, StateId s)
: CacheArcIterator<RmEpsilonFst<Arc>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s);
}
};
template <class Arc>
inline void RmEpsilonFst<Arc>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<RmEpsilonFst<Arc>>(*this);
}
// Useful alias when using StdArc.
using StdRmEpsilonFst = RmEpsilonFst<StdArc>;
} // namespace fst
#endif // FST_RMEPSILON_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/compact/compact8_unweighted-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/compact-fst.h>
namespace fst {
static FstRegisterer<CompactUnweightedFst<StdArc, uint8>>
CompactUnweightedFst_StdArc_uint8_registerer;
static FstRegisterer<CompactUnweightedFst<LogArc, uint8>>
CompactUnweightedFst_LogArc_uint8_registerer;
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/CMakeLists.txt | function (add_executable2 _name)
add_executable(${ARGV})
if (TARGET ${_name})
target_link_libraries(${_name} fstscript fst ${CMAKE_DL_LIBS})
set_target_properties(${_name} PROPERTIES FOLDER bin)
endif()
install(TARGETS ${_name} RUNTIME DESTINATION bin)
endfunction()
include_directories(../include ../script/)
add_executable2(fstarcsort fstarcsort-main.cc fstarcsort.cc)
add_executable2(fstclosure fstclosure-main.cc fstclosure.cc)
add_executable2(fstcompile fstcompile-main.cc fstcompile.cc)
add_executable2(fstcompose fstcompose-main.cc fstcompose.cc)
add_executable2(fstconcat fstconcat-main.cc fstconcat.cc)
add_executable2(fstconnect fstconnect-main.cc fstconnect.cc)
add_executable2(fstconvert fstconvert-main.cc fstconvert.cc)
add_executable2(fstdeterminize fstdeterminize-main.cc fstdeterminize.cc)
add_executable2(fstdifference fstdifference-main.cc fstdifference.cc)
add_executable2(fstdisambiguate fstdisambiguate-main.cc fstdisambiguate.cc)
add_executable2(fstdraw fstdraw-main.cc fstdraw.cc)
add_executable2(fstencode fstencode-main.cc fstencode.cc)
add_executable2(fstepsnormalize fstepsnormalize-main.cc fstepsnormalize.cc)
add_executable2(fstequal fstequal-main.cc fstequal.cc)
add_executable2(fstequivalent fstequivalent-main.cc fstequivalent.cc)
add_executable2(fstinfo fstinfo-main.cc fstinfo.cc)
add_executable2(fstintersect fstintersect-main.cc fstintersect.cc)
add_executable2(fstinvert fstinvert-main.cc fstinvert.cc)
add_executable2(fstisomorphic fstisomorphic-main.cc fstisomorphic.cc)
add_executable2(fstmap fstmap-main.cc fstmap.cc)
add_executable2(fstminimize fstminimize-main.cc fstminimize.cc)
add_executable2(fstprint fstprint-main.cc fstprint.cc)
add_executable2(fstproject fstproject-main.cc fstproject.cc)
add_executable2(fstprune fstprune-main.cc fstprune.cc)
add_executable2(fstpush fstpush-main.cc fstpush.cc)
add_executable2(fstrandgen fstrandgen-main.cc fstrandgen.cc)
add_executable2(fstrelabel fstrelabel-main.cc fstrelabel.cc)
add_executable2(fstreplace fstreplace-main.cc fstreplace.cc)
add_executable2(fstreverse fstreverse-main.cc fstreverse.cc)
add_executable2(fstreweight fstreweight-main.cc fstreweight.cc)
add_executable2(fstrmepsilon fstrmepsilon-main.cc fstrmepsilon.cc)
add_executable2(fstshortestdistance fstshortestdistance-main.cc fstshortestdistance.cc)
add_executable2(fstshortestpath fstshortestpath-main.cc fstshortestpath.cc)
add_executable2(fstsymbols fstsymbols-main.cc fstsymbols.cc)
add_executable2(fstsynchronize fstsynchronize-main.cc fstsynchronize.cc)
add_executable2(fsttopsort fsttopsort-main.cc fsttopsort.cc)
add_executable2(fstunion fstunion-main.cc fstunion.cc)
| 0 |
coqui_public_repos/STT-models/chatino/bozden | coqui_public_repos/STT-models/chatino/bozden/v1.0.0/alphabet.txt | # Each line in this file represents the Unicode codepoint (UTF-8 encoded)
# associated with a numeric index.
# A line that starts with # is a comment. You can escape it with \# if you wish
# to use '#' in the Alphabet.
0
1
2
3
4
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
ñ
õ
ʼ
# The last (non-comment) line needs to end with a newline.
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/isomorphic.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/isomorphic.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
bool Isomorphic(const FstClass &fst1, const FstClass &fst2, float delta) {
if (!internal::ArcTypesMatch(fst1, fst2, "Isomorphic")) return false;
IsomorphicInnerArgs iargs(fst1, fst2, delta);
IsomorphicArgs args(iargs);
Apply<Operation<IsomorphicArgs>>("Isomorphic", fst1.ArcType(), &args);
return args.retval;
}
REGISTER_FST_OPERATION(Isomorphic, StdArc, IsomorphicArgs);
REGISTER_FST_OPERATION(Isomorphic, LogArc, IsomorphicArgs);
REGISTER_FST_OPERATION(Isomorphic, Log64Arc, IsomorphicArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT-examples/net_framework | coqui_public_repos/STT-examples/net_framework/STTWPF/packages.config | <?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="AsyncAwaitBestPractices" version="3.1.0" targetFramework="net462" />
<package id="AsyncAwaitBestPractices.MVVM" version="3.1.0" targetFramework="net462" />
<package id="CommonServiceLocator" version="2.0.2" targetFramework="net462" />
<package id="CSCore" version="1.2.1.2" targetFramework="net462" />
<package id="STT" version="0.9.0" targetFramework="net462" />
<package id="MvvmLightLibs" version="5.4.1.1" targetFramework="net462" />
<package id="NAudio" version="1.9.0" targetFramework="net462" />
</packages> | 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-python_37-win-amd64-opt.yml | build:
template_file: test-win-opt-base.tyml
dependencies:
- "win-amd64-cpu-opt"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
system_setup:
>
${system.sox_win}
args:
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.7.6:m 16k"
metadata:
name: "DeepSpeech Windows AMD64 CPU Python v3.7 tests"
description: "Testing DeepSpeech for Windows/AMD64 on Python v3.7, CPU only, optimized version"
| 0 |
coqui_public_repos/TTS/TTS/encoder | coqui_public_repos/TTS/TTS/encoder/configs/speaker_encoder_config.py | from dataclasses import asdict, dataclass
from TTS.encoder.configs.base_encoder_config import BaseEncoderConfig
@dataclass
class SpeakerEncoderConfig(BaseEncoderConfig):
"""Defines parameters for Speaker Encoder model."""
model: str = "speaker_encoder"
class_name_key: str = "speaker_name"
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-nodejs_10x_8k-linux-amd64-prod_pbmodel-opt.yml | build:
template_file: test-linux-opt-base.tyml
docker_image: "ubuntu:16.04"
dependencies:
- "linux-amd64-cpu-opt"
system_setup:
>
${nodejs.packages_xenial.prep_10} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests-prod.sh 10.x 8k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU NodeJS 10.x prod tests (8kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v10.x on prod model, CPU only, optimized version (8kHz)"
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/utils/helpers.py | import numpy as np
import torch
from scipy.stats import betabinom
from torch.nn import functional as F
try:
from TTS.tts.utils.monotonic_align.core import maximum_path_c
CYTHON = True
except ModuleNotFoundError:
CYTHON = False
class StandardScaler:
"""StandardScaler for mean-scale normalization with the given mean and scale values."""
def __init__(self, mean: np.ndarray = None, scale: np.ndarray = None) -> None:
self.mean_ = mean
self.scale_ = scale
def set_stats(self, mean, scale):
self.mean_ = mean
self.scale_ = scale
def reset_stats(self):
delattr(self, "mean_")
delattr(self, "scale_")
def transform(self, X):
X = np.asarray(X)
X -= self.mean_
X /= self.scale_
return X
def inverse_transform(self, X):
X = np.asarray(X)
X *= self.scale_
X += self.mean_
return X
# from https://gist.github.com/jihunchoi/f1434a77df9db1bb337417854b398df1
def sequence_mask(sequence_length, max_len=None):
"""Create a sequence mask for filtering padding in a sequence tensor.
Args:
sequence_length (torch.tensor): Sequence lengths.
max_len (int, Optional): Maximum sequence length. Defaults to None.
Shapes:
- mask: :math:`[B, T_max]`
"""
if max_len is None:
max_len = sequence_length.max()
seq_range = torch.arange(max_len, dtype=sequence_length.dtype, device=sequence_length.device)
# B x T_max
return seq_range.unsqueeze(0) < sequence_length.unsqueeze(1)
def segment(x: torch.tensor, segment_indices: torch.tensor, segment_size=4, pad_short=False):
"""Segment each sample in a batch based on the provided segment indices
Args:
x (torch.tensor): Input tensor.
segment_indices (torch.tensor): Segment indices.
segment_size (int): Expected output segment size.
pad_short (bool): Pad the end of input tensor with zeros if shorter than the segment size.
"""
# pad the input tensor if it is shorter than the segment size
if pad_short and x.shape[-1] < segment_size:
x = torch.nn.functional.pad(x, (0, segment_size - x.size(2)))
segments = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
index_start = segment_indices[i]
index_end = index_start + segment_size
x_i = x[i]
if pad_short and index_end >= x.size(2):
# pad the sample if it is shorter than the segment size
x_i = torch.nn.functional.pad(x_i, (0, (index_end + 1) - x.size(2)))
segments[i] = x_i[:, index_start:index_end]
return segments
def rand_segments(
x: torch.tensor, x_lengths: torch.tensor = None, segment_size=4, let_short_samples=False, pad_short=False
):
"""Create random segments based on the input lengths.
Args:
x (torch.tensor): Input tensor.
x_lengths (torch.tensor): Input lengths.
segment_size (int): Expected output segment size.
let_short_samples (bool): Allow shorter samples than the segment size.
pad_short (bool): Pad the end of input tensor with zeros if shorter than the segment size.
Shapes:
- x: :math:`[B, C, T]`
- x_lengths: :math:`[B]`
"""
_x_lenghts = x_lengths.clone()
B, _, T = x.size()
if pad_short:
if T < segment_size:
x = torch.nn.functional.pad(x, (0, segment_size - T))
T = segment_size
if _x_lenghts is None:
_x_lenghts = T
len_diff = _x_lenghts - segment_size
if let_short_samples:
_x_lenghts[len_diff < 0] = segment_size
len_diff = _x_lenghts - segment_size
else:
assert all(
len_diff > 0
), f" [!] At least one sample is shorter than the segment size ({segment_size}). \n {_x_lenghts}"
segment_indices = (torch.rand([B]).type_as(x) * (len_diff + 1)).long()
ret = segment(x, segment_indices, segment_size, pad_short=pad_short)
return ret, segment_indices
def average_over_durations(values, durs):
"""Average values over durations.
Shapes:
- values: :math:`[B, 1, T_de]`
- durs: :math:`[B, T_en]`
- avg: :math:`[B, 1, T_en]`
"""
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.functional.pad(durs_cums_ends[:, :-1], (1, 0))
values_nonzero_cums = torch.nn.functional.pad(torch.cumsum(values != 0.0, dim=2), (1, 0))
values_cums = torch.nn.functional.pad(torch.cumsum(values, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = values.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
values_sums = (torch.gather(values_cums, 2, dce) - torch.gather(values_cums, 2, dcs)).float()
values_nelems = (torch.gather(values_nonzero_cums, 2, dce) - torch.gather(values_nonzero_cums, 2, dcs)).float()
avg = torch.where(values_nelems == 0.0, values_nelems, values_sums / values_nelems)
return avg
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def generate_path(duration, mask):
"""
Shapes:
- duration: :math:`[B, T_en]`
- mask: :math:'[B, T_en, T_de]`
- path: :math:`[B, T_en, T_de]`
"""
b, t_x, t_y = mask.shape
cum_duration = torch.cumsum(duration, 1)
cum_duration_flat = cum_duration.view(b * t_x)
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path * mask
return path
def maximum_path(value, mask):
if CYTHON:
return maximum_path_cython(value, mask)
return maximum_path_numpy(value, mask)
def maximum_path_cython(value, mask):
"""Cython optimised version.
Shapes:
- value: :math:`[B, T_en, T_de]`
- mask: :math:`[B, T_en, T_de]`
"""
value = value * mask
device = value.device
dtype = value.dtype
value = value.data.cpu().numpy().astype(np.float32)
path = np.zeros_like(value).astype(np.int32)
mask = mask.data.cpu().numpy()
t_x_max = mask.sum(1)[:, 0].astype(np.int32)
t_y_max = mask.sum(2)[:, 0].astype(np.int32)
maximum_path_c(path, value, t_x_max, t_y_max)
return torch.from_numpy(path).to(device=device, dtype=dtype)
def maximum_path_numpy(value, mask, max_neg_val=None):
"""
Monotonic alignment search algorithm
Numpy-friendly version. It's about 4 times faster than torch version.
value: [b, t_x, t_y]
mask: [b, t_x, t_y]
"""
if max_neg_val is None:
max_neg_val = -np.inf # Patch for Sphinx complaint
value = value * mask
device = value.device
dtype = value.dtype
value = value.cpu().detach().numpy()
mask = mask.cpu().detach().numpy().astype(bool)
b, t_x, t_y = value.shape
direction = np.zeros(value.shape, dtype=np.int64)
v = np.zeros((b, t_x), dtype=np.float32)
x_range = np.arange(t_x, dtype=np.float32).reshape(1, -1)
for j in range(t_y):
v0 = np.pad(v, [[0, 0], [1, 0]], mode="constant", constant_values=max_neg_val)[:, :-1]
v1 = v
max_mask = v1 >= v0
v_max = np.where(max_mask, v1, v0)
direction[:, :, j] = max_mask
index_mask = x_range <= j
v = np.where(index_mask, v_max + value[:, :, j], max_neg_val)
direction = np.where(mask, direction, 1)
path = np.zeros(value.shape, dtype=np.float32)
index = mask[:, :, 0].sum(1).astype(np.int64) - 1
index_range = np.arange(b)
for j in reversed(range(t_y)):
path[index_range, index, j] = 1
index = index + direction[index_range, index, j] - 1
path = path * mask.astype(np.float32)
path = torch.from_numpy(path).to(device=device, dtype=dtype)
return path
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling_factor=1.0):
P, M = phoneme_count, mel_count
x = np.arange(0, P)
mel_text_probs = []
for i in range(1, M + 1):
a, b = scaling_factor * i, scaling_factor * (M + 1 - i)
rv = betabinom(P, a, b)
mel_i_prob = rv.pmf(x)
mel_text_probs.append(mel_i_prob)
return np.array(mel_text_probs)
def compute_attn_prior(x_len, y_len, scaling_factor=1.0):
"""Compute attention priors for the alignment network."""
attn_prior = beta_binomial_prior_distribution(
x_len,
y_len,
scaling_factor,
)
return attn_prior # [y_len, x_len]
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/far/far-class.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/extensions/far/far-class.h>
#include <fst/script/script-impl.h>
#include <fst/extensions/far/script-impl.h>
namespace fst {
namespace script {
// FarReaderClass.
FarReaderClass *FarReaderClass::Open(const string &filename) {
OpenFarReaderClassArgs1 args(filename);
args.retval = nullptr;
Apply<Operation<OpenFarReaderClassArgs1>>("OpenFarReaderClass",
LoadArcTypeFromFar(filename),
&args);
return args.retval;
}
FarReaderClass *FarReaderClass::Open(const std::vector<string> &filenames) {
if (filenames.empty()) {
LOG(ERROR) << "FarReaderClass::Open: No files specified";
return nullptr;
}
auto it = filenames.cbegin();
const auto arc_type = LoadArcTypeFromFar(*it);
if (arc_type.empty()) return nullptr;
// FIXME(kbg): Is any of this really necessary? I am doing this purely
// to conform to what I did with fst::script::Replace.
++it;
for (; it != filenames.cend(); ++it) {
const string other_arc_type = LoadArcTypeFromFar(*it);
if (other_arc_type.empty()) return nullptr;
if (arc_type != other_arc_type) {
LOG(ERROR) << "FarReaderClass::Open: Trying to open FARs with "
<< "non-matching arc types:\n\t" << arc_type << " and "
<< other_arc_type;
return nullptr;
}
}
OpenFarReaderClassArgs2 args(filenames);
args.retval = nullptr;
Apply<Operation<OpenFarReaderClassArgs2>>("OpenFarReaderClass", arc_type,
&args);
return args.retval;
}
REGISTER_FST_OPERATION(OpenFarReaderClass, StdArc, OpenFarReaderClassArgs1);
REGISTER_FST_OPERATION(OpenFarReaderClass, LogArc, OpenFarReaderClassArgs1);
REGISTER_FST_OPERATION(OpenFarReaderClass, Log64Arc, OpenFarReaderClassArgs1);
REGISTER_FST_OPERATION(OpenFarReaderClass, StdArc, OpenFarReaderClassArgs2);
REGISTER_FST_OPERATION(OpenFarReaderClass, LogArc, OpenFarReaderClassArgs2);
REGISTER_FST_OPERATION(OpenFarReaderClass, Log64Arc, OpenFarReaderClassArgs2);
// FarWriterClass.
FarWriterClass *FarWriterClass::Create(const string &filename,
const string &arc_type, FarType type) {
CreateFarWriterClassInnerArgs iargs(filename, type);
CreateFarWriterClassArgs args(iargs);
args.retval = nullptr;
Apply<Operation<CreateFarWriterClassArgs>>("CreateFarWriterClass", arc_type,
&args);
return args.retval;
}
REGISTER_FST_OPERATION(CreateFarWriterClass, StdArc, CreateFarWriterClassArgs);
REGISTER_FST_OPERATION(CreateFarWriterClass, LogArc, CreateFarWriterClassArgs);
REGISTER_FST_OPERATION(CreateFarWriterClass, Log64Arc,
CreateFarWriterClassArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT-models/dhivehi/itml | coqui_public_repos/STT-models/dhivehi/itml/v0.1.1/MODEL_CARD.md | # Model card for Dhivehi STT
Jump to section:
- [Model details](#model-details)
- [Intended use](#intended-use)
- [Performance Factors](#performance-factors)
- [Metrics](#metrics)
- [Training data](#training-data)
- [Evaluation data](#evaluation-data)
- [Ethical considerations](#ethical-considerations)
- [Caveats and recommendations](#caveats-and-recommendations)
## Model details
- Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group.
- Model language: Dhivehi / ދިވެހި / `dv`
- Model date: April 26, 2021
- Model type: `Speech-to-Text`
- Model version: `v0.1.1`
- Compatible with 🐸 STT version: `v0.9.3`
- License: AGPL
- Citation details: `@techreport{dhivehi-stt, author = {Tyers,Francis}, title = {Dhivehi STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-DV-0.1} }`
- Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/).
## Intended use
Speech-to-Text for the [Dhivehi Language](https://en.wikipedia.org/wiki/Dhivehi_language) on 16kHz, mono-channel audio.
## Performance Factors
Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
## Metrics
STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk.
#### Transcription Accuracy
The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/cv/).
|Test Corpus|WER|CER|
|-----------|---|---|
|Common Voice|91.2\%|29.3\%|
#### Real-Time Factor
Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF.
Recorded average RTF on laptop CPU: ``
#### Model Size
`model.pbmm`: 181M
`model.tflite`: 46M
### Approaches to uncertainty and variability
Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio.
## Training data
This model was trained on Common Voice 6.1 train.
## Evaluation data
The Model was evaluated on Common Voice 6.1 test.
## Ethical considerations
Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use.
### Demographic Bias
You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue.
### Surveillance
Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech.
## Caveats and recommendations
Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
| 0 |
coqui_public_repos/STT/native_client/wasm | coqui_public_repos/STT/native_client/wasm/test/package.json.in | {
"name": "stt-wasm-test",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "jest"
},
"jest": {
"testEnvironment": "node",
"transform": {
"\\.js$": "babel-jest"
},
"transformIgnorePatterns": []
},
"author": "",
"license": "ISC",
"devDependencies": {
"jest": "28.1.3",
"babel-plugin-transform-import-meta": "^2.2.0",
"@babel/preset-env": "7.18.9"
}
}
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/examples-vad_transcriber-py35.yml | build:
template_file: examples-base.tyml
docker_image: "python:3.5"
dependencies:
- "linux-amd64-cpu-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/examples/vad_transcriber/test.sh 3.5.0:m"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech examples: VAD transcriber Py3.5"
description: "DeepSpeech examples: VAD transcriber streaming Python 3.5"
| 0 |
coqui_public_repos/STT-models/totonac/bozden | coqui_public_repos/STT-models/totonac/bozden/v1.0.0/alphabet.txt | # Each line in this file represents the Unicode codepoint (UTF-8 encoded)
# associated with a numeric index.
# A line that starts with # is a comment. You can escape it with \# if you wish
# to use '#' in the Alphabet.
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
á
é
í
ñ
ó
ú
ʼ
# The last (non-comment) line needs to end with a newline.
| 0 |
coqui_public_repos/STT-models/finnish/itml | coqui_public_repos/STT-models/finnish/itml/v0.1.0/MODEL_CARD.md | # Model card for Finnish STT
Jump to section:
- [Model details](#model-details)
- [Intended use](#intended-use)
- [Performance Factors](#performance-factors)
- [Metrics](#metrics)
- [Training data](#training-data)
- [Evaluation data](#evaluation-data)
- [Ethical considerations](#ethical-considerations)
- [Caveats and recommendations](#caveats-and-recommendations)
## Model details
- Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group.
- Model language: Finnish / Suomi / `fi`
- Model date: April 9, 2021
- Model type: `Speech-to-Text`
- Model version: `v0.1.0`
- Compatible with 🐸 STT version: `v0.9.3`
- License: AGPL
- Citation details: `@techreport{finnish-stt, author = {Tyers,Francis}, title = {Finnish STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-FI-0.1} }`
- Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/).
## Intended use
Speech-to-Text for the [Finnish Language](https://en.wikipedia.org/wiki/Finnish_language) on 16kHz, mono-channel audio.
## Performance Factors
Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
## Metrics
STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk.
#### Transcription Accuracy
The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/cv/).
|Test Corpus|WER|CER|
|-----------|---|---|
|Common Voice|99.7\%|39.1\%|
#### Real-Time Factor
Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF.
Recorded average RTF on laptop CPU: ``
#### Model Size
`model.pbmm`: 181M
`model.tflite`: 46M
### Approaches to uncertainty and variability
Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio.
## Training data
This model was trained on Common Voice 6.1 train.
## Evaluation data
The Model was evaluated on Common Voice 6.1 test.
## Ethical considerations
Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use.
### Demographic Bias
You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue.
### Surveillance
Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech.
## Caveats and recommendations
Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-generate_scorer-android-24-arm64-opt.yml | build:
template_file: test-android-opt-base.tyml
dependencies:
- "android-arm64-cpu-opt"
- "kenlm_android-arm64-cpu-opt"
- "android-cache-arm64-v8a-android-24"
cache:
url: ${system.android_cache.arm64_v8a.android_24.url}
namespace: ${system.android_cache.arm64_v8a.android_24.namespace}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-scorer-tests.sh ${system.kenlm.android_arm64_cpu.url} android arm64-v8a android-24"
workerType: "${docker.dsTests}"
metadata:
name: "Testing DeepSpeech Android 7.0 ARM64 CPU generate scorer"
description: "Generate a DeepSpeech Scorer for Android 7.0/ARM64, CPU only, optimized version"
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/lm/trie_sort.cc | #include "lm/trie_sort.hh"
#include "lm/config.hh"
#include "lm/lm_exception.hh"
#include "lm/read_arpa.hh"
#include "lm/vocab.hh"
#include "lm/weights.hh"
#include "lm/word_index.hh"
#include "util/file_piece.hh"
#include "util/mmap.hh"
#include "util/pool.hh"
#include "util/proxy_iterator.hh"
#include "util/sized_iterator.hh"
#include <algorithm>
#include <cstring>
#include <cstdio>
#include <cstdlib>
#include <deque>
#include <iterator>
#include <limits>
#include <vector>
namespace lm {
namespace ngram {
namespace trie {
namespace {
typedef util::SizedIterator NGramIter;
// Proxy for an entry except there is some extra cruft between the entries. This is used to sort (n-1)-grams using the same memory as the sorted n-grams.
class PartialViewProxy {
public:
PartialViewProxy() : attention_size_(0), inner_() {}
PartialViewProxy(void *ptr, std::size_t block_size, util::FreePool &pool) : attention_size_(pool.ElementSize()), inner_(ptr, block_size), pool_(&pool) {}
operator util::ValueBlock() const {
return util::ValueBlock(inner_.Data(), *pool_);
}
PartialViewProxy &operator=(const PartialViewProxy &from) {
memcpy(inner_.Data(), from.inner_.Data(), attention_size_);
return *this;
}
PartialViewProxy &operator=(const util::ValueBlock &from) {
memcpy(inner_.Data(), from.Data(), attention_size_);
return *this;
}
const void *Data() const { return inner_.Data(); }
void *Data() { return inner_.Data(); }
friend void swap(PartialViewProxy first, PartialViewProxy second);
private:
friend class util::ProxyIterator<PartialViewProxy>;
typedef util::ValueBlock value_type;
const std::size_t attention_size_;
typedef util::SizedInnerIterator InnerIterator;
InnerIterator &Inner() { return inner_; }
const InnerIterator &Inner() const { return inner_; }
InnerIterator inner_;
util::FreePool *pool_;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
void swap(PartialViewProxy first, PartialViewProxy second) {
std::swap_ranges(reinterpret_cast<char*>(first.Data()), reinterpret_cast<char*>(first.Data()) + first.attention_size_, reinterpret_cast<char*>(second.Data()));
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
typedef util::ProxyIterator<PartialViewProxy> PartialIter;
FILE *DiskFlush(const void *mem_begin, const void *mem_end, const std::string &temp_prefix) {
util::scoped_fd file(util::MakeTemp(temp_prefix));
util::WriteOrThrow(file.get(), mem_begin, (uint8_t*)mem_end - (uint8_t*)mem_begin);
return util::FDOpenOrThrow(file);
}
FILE *WriteContextFile(uint8_t *begin, uint8_t *end, const std::string &temp_prefix, std::size_t entry_size, unsigned char order) {
const size_t context_size = sizeof(WordIndex) * (order - 1);
util::FreePool pool(context_size);
// Sort just the contexts using the same memory.
PartialIter context_begin(PartialViewProxy(begin + sizeof(WordIndex), entry_size, pool));
PartialIter context_end(PartialViewProxy(end + sizeof(WordIndex), entry_size, pool));
#if defined(_WIN32) || defined(_WIN64)
std::stable_sort
#else
std::sort
#endif
(context_begin, context_end, util::SizedCompare<EntryCompare, PartialViewProxy>(EntryCompare(order - 1)));
util::scoped_FILE out(util::FMakeTemp(temp_prefix));
// Write out to file and uniqueify at the same time. Could have used unique_copy if there was an appropriate OutputIterator.
if (context_begin == context_end) return out.release();
PartialIter i(context_begin);
util::WriteOrThrow(out.get(), i->Data(), context_size);
const void *previous = i->Data();
++i;
for (; i != context_end; ++i) {
if (memcmp(previous, i->Data(), context_size)) {
util::WriteOrThrow(out.get(), i->Data(), context_size);
previous = i->Data();
}
}
return out.release();
}
struct ThrowCombine {
void operator()(std::size_t entry_size, unsigned char order, const void *first, const void *second, FILE * /*out*/) const {
const WordIndex *base = reinterpret_cast<const WordIndex*>(first);
FormatLoadException e;
e << "Duplicate n-gram detected with vocab ids";
for (const WordIndex *i = base; i != base + order; ++i) {
e << ' ' << *i;
}
throw e;
}
};
// Useful for context files that just contain records with no value.
struct FirstCombine {
void operator()(std::size_t entry_size, unsigned char /*order*/, const void *first, const void * /*second*/, FILE *out) const {
util::WriteOrThrow(out, first, entry_size);
}
};
template <class Combine> FILE *MergeSortedFiles(FILE *first_file, FILE *second_file, const std::string &temp_prefix, std::size_t weights_size, unsigned char order, const Combine &combine) {
std::size_t entry_size = sizeof(WordIndex) * order + weights_size;
RecordReader first, second;
first.Init(first_file, entry_size);
second.Init(second_file, entry_size);
util::scoped_FILE out_file(util::FMakeTemp(temp_prefix));
EntryCompare less(order);
while (first && second) {
if (less(first.Data(), second.Data())) {
util::WriteOrThrow(out_file.get(), first.Data(), entry_size);
++first;
} else if (less(second.Data(), first.Data())) {
util::WriteOrThrow(out_file.get(), second.Data(), entry_size);
++second;
} else {
combine(entry_size, order, first.Data(), second.Data(), out_file.get());
++first; ++second;
}
}
for (RecordReader &remains = (first ? first : second); remains; ++remains) {
util::WriteOrThrow(out_file.get(), remains.Data(), entry_size);
}
return out_file.release();
}
} // namespace
void RecordReader::Init(FILE *file, std::size_t entry_size) {
entry_size_ = entry_size;
data_.reset(malloc(entry_size));
UTIL_THROW_IF(!data_.get(), util::ErrnoException, "Failed to malloc read buffer");
file_ = file;
if (file) {
rewind(file);
remains_ = true;
++*this;
} else {
remains_ = false;
}
}
void RecordReader::Overwrite(const void *start, std::size_t amount) {
long internal = (uint8_t*)start - (uint8_t*)data_.get();
UTIL_THROW_IF(fseek(file_, internal - entry_size_, SEEK_CUR), util::ErrnoException, "Couldn't seek backwards for revision");
util::WriteOrThrow(file_, start, amount);
long forward = entry_size_ - internal - amount;
#if !defined(_WIN32) && !defined(_WIN64)
if (forward)
#endif
UTIL_THROW_IF(fseek(file_, forward, SEEK_CUR), util::ErrnoException, "Couldn't seek forwards past revision");
}
void RecordReader::Rewind() {
if (file_) {
rewind(file_);
remains_ = true;
++*this;
} else {
remains_ = false;
}
}
SortedFiles::SortedFiles(const Config &config, util::FilePiece &f, std::vector<uint64_t> &counts, size_t buffer, const std::string &file_prefix, SortedVocabulary &vocab) {
PositiveProbWarn warn(config.positive_log_probability);
unigram_.reset(util::MakeTemp(file_prefix));
{
// In case <unk> appears.
size_t size_out = (counts[0] + 1) * sizeof(ProbBackoff);
util::scoped_mmap unigram_mmap(util::MapZeroedWrite(unigram_.get(), size_out), size_out);
Read1Grams(f, counts[0], vocab, reinterpret_cast<ProbBackoff*>(unigram_mmap.get()), warn);
CheckSpecials(config, vocab);
if (!vocab.SawUnk()) ++counts[0];
}
// Only use as much buffer as we need.
size_t buffer_use = 0;
for (unsigned int order = 2; order < counts.size(); ++order) {
buffer_use = std::max<size_t>(buffer_use, static_cast<size_t>((sizeof(WordIndex) * order + 2 * sizeof(float)) * counts[order - 1]));
}
buffer_use = std::max<size_t>(buffer_use, static_cast<size_t>((sizeof(WordIndex) * counts.size() + sizeof(float)) * counts.back()));
buffer = std::min<size_t>(buffer, buffer_use);
util::scoped_malloc mem;
mem.reset(malloc(buffer));
if (!mem.get()) UTIL_THROW(util::ErrnoException, "malloc failed for sort buffer size " << buffer);
for (unsigned char order = 2; order <= counts.size(); ++order) {
ConvertToSorted(f, vocab, counts, file_prefix, order, warn, mem.get(), buffer);
}
ReadEnd(f);
}
namespace {
class Closer {
public:
explicit Closer(std::deque<FILE*> &files) : files_(files) {}
~Closer() {
for (std::deque<FILE*>::iterator i = files_.begin(); i != files_.end(); ++i) {
util::scoped_FILE deleter(*i);
}
}
void PopFront() {
util::scoped_FILE deleter(files_.front());
files_.pop_front();
}
private:
std::deque<FILE*> &files_;
};
} // namespace
void SortedFiles::ConvertToSorted(util::FilePiece &f, const SortedVocabulary &vocab, const std::vector<uint64_t> &counts, const std::string &file_prefix, unsigned char order, PositiveProbWarn &warn, void *mem, std::size_t mem_size) {
ReadNGramHeader(f, order);
const size_t count = counts[order - 1];
// Size of weights. Does it include backoff?
const size_t words_size = sizeof(WordIndex) * order;
const size_t weights_size = sizeof(float) + ((order == counts.size()) ? 0 : sizeof(float));
const size_t entry_size = words_size + weights_size;
const size_t batch_size = std::min(count, mem_size / entry_size);
uint8_t *const begin = reinterpret_cast<uint8_t*>(mem);
std::deque<FILE*> files, contexts;
Closer files_closer(files), contexts_closer(contexts);
for (std::size_t batch = 0, done = 0; done < count; ++batch) {
uint8_t *out = begin;
uint8_t *out_end = out + std::min(count - done, batch_size) * entry_size;
if (order == counts.size()) {
for (; out != out_end; out += entry_size) {
std::reverse_iterator<WordIndex*> it(reinterpret_cast<WordIndex*>(out) + order);
ReadNGram(f, order, vocab, it, *reinterpret_cast<Prob*>(out + words_size), warn);
}
} else {
for (; out != out_end; out += entry_size) {
std::reverse_iterator<WordIndex*> it(reinterpret_cast<WordIndex*>(out) + order);
ReadNGram(f, order, vocab, it, *reinterpret_cast<ProbBackoff*>(out + words_size), warn);
}
}
// Sort full records by full n-gram.
util::SizedSort(begin, out_end, entry_size, EntryCompare(order));
files.push_back(DiskFlush(begin, out_end, file_prefix));
contexts.push_back(WriteContextFile(begin, out_end, file_prefix, entry_size, order));
done += (out_end - begin) / entry_size;
}
// All individual files created. Merge them.
while (files.size() > 1) {
files.push_back(MergeSortedFiles(files[0], files[1], file_prefix, weights_size, order, ThrowCombine()));
files_closer.PopFront();
files_closer.PopFront();
contexts.push_back(MergeSortedFiles(contexts[0], contexts[1], file_prefix, 0, order - 1, FirstCombine()));
contexts_closer.PopFront();
contexts_closer.PopFront();
}
if (!files.empty()) {
// Steal from closers.
full_[order - 2].reset(files.front());
files.pop_front();
context_[order - 2].reset(contexts.front());
contexts.pop_front();
}
}
} // namespace trie
} // namespace ngram
} // namespace lm
| 0 |
coqui_public_repos/snakepit/src | coqui_public_repos/snakepit/src/models/Job-model.js | const assign = require('assign-deep')
const Sequelize = require('sequelize')
const sequelize = require('./db.js')
const Pit = require('./Pit-model.js')
const Group = require('./Group-model.js')
const User = require('./User-model.js')
const State = require('./State-model.js')
const ProcessGroup = require('./ProcessGroup-model.js')
const Process = require('./Process-model.js')
const Allocation = require('./Allocation-model.js')
const log = require('../utils/logger.js')
var Job = sequelize.define('job', {
id: { type: Sequelize.INTEGER, primaryKey: true },
description: { type: Sequelize.STRING, allowNull: false },
provisioning: { type: Sequelize.STRING, allowNull: false },
request: { type: Sequelize.STRING, allowNull: false },
image: { type: Sequelize.STRING, allowNull: true },
state: { type: Sequelize.INTEGER, allowNull: true },
since: { type: Sequelize.DATE, allowNull: true },
rank: { type: Sequelize.INTEGER, allowNull: false, defaultValue: 0 },
allocation: { type: Sequelize.STRING, allowNull: true },
continues: { type: Sequelize.INTEGER, allowNull: true }
})
Job.jobStates = {
NEW: 0,
PREPARING: 1,
WAITING: 2,
STARTING: 3,
RUNNING: 4,
STOPPING: 5,
CLEANING: 6,
DONE: 7
}
Job.hasMany(State, { onDelete: 'cascade' })
State.belongsTo(Job)
Job.hasMany(ProcessGroup, { onDelete: 'cascade' })
ProcessGroup.belongsTo(Job)
Job.belongsTo(Pit, { foreignKey: 'id', onDelete: 'cascade' })
Job.belongsTo(User, { constraints: false })
var JobGroup = Job.JobGroup = sequelize.define('jobgroup', {
jobId: { type: Sequelize.INTEGER, unique: 'pk' },
groupId: { type: Sequelize.STRING, unique: 'pk' }
})
Job.hasMany(JobGroup, { onDelete: 'cascade' })
Group.hasMany(JobGroup, { onDelete: 'cascade' })
JobGroup.belongsTo(Job)
JobGroup.belongsTo(Group)
User.prototype.canAccessJob = async function (job) {
if (this.admin || job.userId == this.id) {
return true
}
return await Job.findOne({
where: { id: job.id, '$jobgroups->group->usergroups.userId$': this.id },
include: [
{
model: JobGroup,
require: true,
include: [
{
model: Group,
require: true,
include: [
{
model: User.UserGroup,
require: true
}
]
}
]
}
]
})
}
Job.getDir = (jobId) => Pit.getDir(jobId)
Job.prototype.getDir = function () {
return Pit.getDir(this.id)
}
Job.getDirExternal = (jobId) => Pit.getDirExternal(jobId)
Job.prototype.getDirExternal = function () {
return Pit.getDirExternal(this.id)
}
Job.prototype.setState = async function (state, reason) {
if (this.state == state) {
return
}
let t
try {
t = await sequelize.transaction({ type: Sequelize.Transaction.TYPES.EXCLUSIVE })
if (this.state != Job.jobStates.WAITING && state == Job.jobStates.WAITING) {
this.rank = ((await Job.max('rank', { where: { state: Job.jobStates.WAITING } })) || 0) + 1
} else if (this.state == Job.jobStates.WAITING && state != Job.jobStates.WAITING) {
await Job.update(
{ rank: Sequelize.literal('rank - 1') },
{
where: {
state: Job.jobStates.WAITING,
rank: { [Sequelize.Op.gt]: this.rank }
},
transaction: t,
lock: t.LOCK
}
)
this.rank = 0
}
this.state = state
this.since = Date.now()
await this.save({ transaction: t, lock: t.LOCK })
await State.create({ jobId: this.id, state: state, since: Date.now(), reason: reason })
await t.commit()
} catch (err) {
await t.rollback()
throw err
}
}
Job.infoQuery = options => assign({
subQuery: false,
include: [
{
model: ProcessGroup,
require: false,
attributes: [],
include: [
{
model: Process,
require: false,
attributes: [],
include:
[
{
model: Allocation,
require: false,
attributes: []
}
]
}
]
}
],
group: [
'job.id'
],
attributes: {
include: [
[sequelize.fn('sum', sequelize.col('processgroups->processes->allocations.samples')), 'samples'],
[sequelize.fn('sum', sequelize.col('processgroups->processes->allocations.acompute')), 'aggcompute'],
[sequelize.fn('sum', sequelize.col('processgroups->processes->allocations.amemory')), 'aggmemory'],
[sequelize.fn('avg', sequelize.col('processgroups->processes->allocations.ccompute')), 'curcompute'],
[sequelize.fn('avg', sequelize.col('processgroups->processes->allocations.cmemory')), 'curmemory']
]
}
}, options || {})
Allocation.activeQuery = {
include: [
{
model: Process,
require: true,
attributes: [],
include: [
{
model: ProcessGroup,
require: true,
attributes: [],
include: [
{
model: Job,
require: true,
attributes: []
}
]
}
]
}
],
where: {
'$process->processgroup->job.state$': {
[Sequelize.Op.gte]: Job.jobStates.STARTING,
[Sequelize.Op.lte]: Job.jobStates.STOPPING
}
}
}
module.exports = Job
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/compress/Makefile.in | # Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
@HAVE_BIN_TRUE@bin_PROGRAMS = fstcompress$(EXEEXT) fstrandmod$(EXEEXT)
subdir = src/extensions/compress
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/depcomp
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"
LTLIBRARIES = $(lib_LTLIBRARIES)
am__DEPENDENCIES_1 =
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_DEPENDENCIES = \
@HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__libfstcompressscript_la_SOURCES_DIST = compress-script.cc
@HAVE_SCRIPT_TRUE@am_libfstcompressscript_la_OBJECTS = \
@HAVE_SCRIPT_TRUE@ compress-script.lo
libfstcompressscript_la_OBJECTS = \
$(am_libfstcompressscript_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
libfstcompressscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(libfstcompressscript_la_LDFLAGS) \
$(LDFLAGS) -o $@
@HAVE_SCRIPT_TRUE@am_libfstcompressscript_la_rpath = -rpath $(libdir)
PROGRAMS = $(bin_PROGRAMS)
am__fstcompress_SOURCES_DIST = fstcompress.cc
@HAVE_BIN_TRUE@am_fstcompress_OBJECTS = fstcompress.$(OBJEXT)
fstcompress_OBJECTS = $(am_fstcompress_OBJECTS)
fstcompress_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstcompress_DEPENDENCIES = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__fstrandmod_SOURCES_DIST = fstrandmod.cc
@HAVE_BIN_TRUE@am_fstrandmod_OBJECTS = fstrandmod.$(OBJEXT)
fstrandmod_OBJECTS = $(am_fstrandmod_OBJECTS)
fstrandmod_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstrandmod_DEPENDENCIES = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES =
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CXXFLAGS) $(CXXFLAGS)
AM_V_CXX = $(am__v_CXX_@AM_V@)
am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
am__v_CXX_0 = @echo " CXX " $@;
am__v_CXX_1 =
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
am__v_CXXLD_0 = @echo " CXXLD " $@;
am__v_CXXLD_1 =
SOURCES = $(libfstcompressscript_la_SOURCES) $(fstcompress_SOURCES) \
$(fstrandmod_SOURCES)
DIST_SOURCES = $(am__libfstcompressscript_la_SOURCES_DIST) \
$(am__fstcompress_SOURCES_DIST) $(am__fstrandmod_SOURCES_DIST)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS)
@HAVE_BIN_TRUE@LDADD = libfstcompressscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la \
@HAVE_BIN_TRUE@ -lm $(DL_LIBS)
@HAVE_BIN_TRUE@fstcompress_SOURCES = fstcompress.cc
@HAVE_BIN_TRUE@fstrandmod_SOURCES = fstrandmod.cc
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_SOURCES = compress-script.cc
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_LDFLAGS = -version-info 10:0:0
@HAVE_SCRIPT_TRUE@libfstcompressscript_la_LIBADD = \
@HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lz -lm $(DL_LIBS)
@HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstcompressscript.la
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/compress/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/compress/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
}
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
libfstcompressscript.la: $(libfstcompressscript_la_OBJECTS) $(libfstcompressscript_la_DEPENDENCIES) $(EXTRA_libfstcompressscript_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libfstcompressscript_la_LINK) $(am_libfstcompressscript_la_rpath) $(libfstcompressscript_la_OBJECTS) $(libfstcompressscript_la_LIBADD) $(LIBS)
install-binPROGRAMS: $(bin_PROGRAMS)
@$(NORMAL_INSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
$(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
while read p p1; do if test -f $$p \
|| test -f $$p1 \
; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
sed -e 'p;s,.*/,,;n;h' \
-e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
{ d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
if ($$2 == $$4) files[d] = files[d] " " $$1; \
else { print "f", $$3 "/" $$4, $$1; } } \
END { for (d in files) print "f", d, files[d] }' | \
while read type dir files; do \
if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
test -z "$$files" || { \
echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
$(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
} \
; done
uninstall-binPROGRAMS:
@$(NORMAL_UNINSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
-e 's/$$/$(EXEEXT)/' \
`; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(bindir)" && rm -f $$files
clean-binPROGRAMS:
@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
echo " rm -f" $$list; \
rm -f $$list || exit $$?; \
test -n "$(EXEEXT)" || exit 0; \
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
fstcompress$(EXEEXT): $(fstcompress_OBJECTS) $(fstcompress_DEPENDENCIES) $(EXTRA_fstcompress_DEPENDENCIES)
@rm -f fstcompress$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstcompress_OBJECTS) $(fstcompress_LDADD) $(LIBS)
fstrandmod$(EXEEXT): $(fstrandmod_OBJECTS) $(fstrandmod_DEPENDENCIES) $(EXTRA_fstrandmod_DEPENDENCIES)
@rm -f fstrandmod$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstrandmod_OBJECTS) $(fstrandmod_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compress-script.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstcompress.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstrandmod.Po@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
install-binPROGRAMS: install-libLTLIBRARIES
installdirs:
for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libtool mostlyclean-am
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-binPROGRAMS install-libLTLIBRARIES
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libtool cscopelist-am ctags ctags-am distclean \
distclean-compile distclean-generic distclean-libtool \
distclean-tags distdir dvi dvi-am html html-am info info-am \
install install-am install-binPROGRAMS install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-libLTLIBRARIES install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \
uninstall-libLTLIBRARIES
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-python_39_8k-linux-amd64-prod_pbmodel-opt.yml | build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-cpu-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python-tests-prod.sh 3.9.0: 8k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU Python v3.9 prod tests (8kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.9 on prod model, CPU only, optimized version (8kHz)"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/interval-set.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to represent and operate on sets of intervals.
#ifndef FST_INTERVAL_SET_H_
#define FST_INTERVAL_SET_H_
#include <algorithm>
#include <iostream>
#include <vector>
#include <fst/util.h>
namespace fst {
// Half-open integral interval [a, b) of signed integers of type T.
template <class T>
struct IntInterval {
T begin;
T end;
IntInterval() : begin(-1), end(-1) {}
IntInterval(T begin, T end) : begin(begin), end(end) {}
bool operator<(const IntInterval<T> &i) const {
return begin < i.begin || (begin == i.begin && end > i.end);
}
bool operator==(const IntInterval<T> &i) const {
return begin == i.begin && end == i.end;
}
bool operator!=(const IntInterval<T> &i) const {
return begin != i.begin || end != i.end;
}
std::istream &Read(std::istream &strm) {
T n;
ReadType(strm, &n);
begin = n;
ReadType(strm, &n);
end = n;
return strm;
}
std::ostream &Write(std::ostream &strm) const {
T n = begin;
WriteType(strm, n);
n = end;
WriteType(strm, n);
return strm;
}
};
// Stores IntIntervals<T> in a vector. In addition, keeps the count of points in
// all intervals.
template <class T>
class VectorIntervalStore {
public:
using Interval = IntInterval<T>;
using Iterator = typename std::vector<Interval>::const_iterator;
VectorIntervalStore() : count_(-1) {}
std::vector<Interval> *MutableIntervals() { return &intervals_; }
const Interval *Intervals() const { return intervals_.data(); }
T Size() const { return intervals_.size(); }
T Count() const { return count_; }
void SetCount(T count) { count_ = count; }
void Clear() {
intervals_.clear();
count_ = 0;
}
Iterator begin() const { return intervals_.begin(); }
Iterator end() const { return intervals_.end(); }
std::istream &Read(std::istream &strm) {
ReadType(strm, &intervals_);
return ReadType(strm, &count_);
}
std::ostream &Write(std::ostream &strm) const {
WriteType(strm, intervals_);
return WriteType(strm, count_);
}
private:
std::vector<Interval> intervals_;
T count_;
};
// Stores and operates on a set of half-open integral intervals [a, b)
// of signed integers of type T.
template <class T, class Store = VectorIntervalStore<T>>
class IntervalSet {
public:
using Interval = IntInterval<T>;
template <class... A>
explicit IntervalSet(A... args) : intervals_(args...) {}
// Returns the interval set as a vector.
std::vector<Interval> *MutableIntervals() {
return intervals_.MutableIntervals();
}
// Returns a pointer to an array of Size() elements.
const Interval *Intervals() const { return intervals_.Intervals(); }
bool Empty() const { return Size() == 0; }
T Size() const { return intervals_.Size(); }
// Number of points in the intervals (undefined if not normalized).
T Count() const { return intervals_.Count(); }
void Clear() { intervals_.Clear(); }
// Adds an interval set to the set. The result may not be normalized.
void Union(const IntervalSet<T, Store> &iset) {
intervals_.MutableIntervals()->insert(intervals_.MutableIntervals()->end(),
iset.intervals_.begin(),
iset.intervals_.end());
}
// Requires intervals be normalized.
bool Member(T value) const {
const Interval interval(value, value);
auto lb = std::lower_bound(intervals_.begin(), intervals_.end(), interval);
if (lb == intervals_.begin()) return false;
return (--lb)->end > value;
}
// Requires intervals be normalized.
bool operator==(const IntervalSet<T, Store> &iset) const {
return Size() == iset.Size() &&
std::equal(intervals_.begin(), intervals_.end(),
iset.intervals_.begin());
}
// Requires intervals be normalized.
bool operator!=(const IntervalSet<T, Store> &iset) const {
return Size() != iset.Size() ||
!std::equal(intervals_.begin(), intervals_.end(),
iset.intervals_.begin());
}
bool Singleton() const {
return Size() == 1 &&
intervals_.begin()->begin + 1 == intervals_.begin()->end;
}
// Sorts, collapses overlapping and adjacent interals, and sets count.
void Normalize();
// Intersects an interval set with the set. Requires intervals be normalized.
// The result is normalized.
void Intersect(const IntervalSet<T, Store> &iset,
IntervalSet<T, Store> *oset) const;
// Complements the set w.r.t [0, maxval). Requires intervals be normalized.
// The result is normalized.
void Complement(T maxval, IntervalSet<T, Store> *oset) const;
// Subtract an interval set from the set. Requires intervals be normalized.
// The result is normalized.
void Difference(const IntervalSet<T, Store> &iset,
IntervalSet<T, Store> *oset) const;
// Determines if an interval set overlaps with the set. Requires intervals be
// normalized.
bool Overlaps(const IntervalSet<T, Store> &iset) const;
// Determines if an interval set overlaps with the set but neither is
// contained in the other. Requires intervals be normalized.
bool StrictlyOverlaps(const IntervalSet<T, Store> &iset) const;
// Determines if an interval set is contained within the set. Requires
// intervals be normalized.
bool Contains(const IntervalSet<T, Store> &iset) const;
std::istream &Read(std::istream &strm) { return intervals_.Read(strm); }
std::ostream &Write(std::ostream &strm) const {
return intervals_.Write(strm);
}
typename Store::Iterator begin() const { return intervals_.begin(); }
typename Store::Iterator end() const { return intervals_.end(); }
private:
Store intervals_;
};
// Sorts, collapses overlapping and adjacent intervals, and sets count.
template <typename T, class Store>
void IntervalSet<T, Store>::Normalize() {
auto &intervals = *intervals_.MutableIntervals();
std::sort(intervals.begin(), intervals.end());
T count = 0;
T size = 0;
for (T i = 0; i < intervals.size(); ++i) {
auto &inti = intervals[i];
if (inti.begin == inti.end) continue;
for (T j = i + 1; j < intervals.size(); ++j) {
auto &intj = intervals[j];
if (intj.begin > inti.end) break;
if (intj.end > inti.end) inti.end = intj.end;
++i;
}
count += inti.end - inti.begin;
intervals[size++] = inti;
}
intervals.resize(size);
intervals_.SetCount(count);
}
// Intersects an interval set with the set. Requires intervals be normalized.
// The result is normalized.
template <typename T, class Store>
void IntervalSet<T, Store>::Intersect(const IntervalSet<T, Store> &iset,
IntervalSet<T, Store> *oset) const {
auto *ointervals = oset->MutableIntervals();
auto it1 = intervals_.begin();
auto it2 = iset.intervals_.begin();
ointervals->clear();
T count = 0;
while (it1 != intervals_.end() && it2 != iset.intervals_.end()) {
if (it1->end <= it2->begin) {
++it1;
} else if (it2->end <= it1->begin) {
++it2;
} else {
ointervals->emplace_back(std::max(it1->begin, it2->begin),
std::min(it1->end, it2->end));
count += ointervals->back().end - ointervals->back().begin;
if (it1->end < it2->end) {
++it1;
} else {
++it2;
}
}
}
oset->intervals_.SetCount(count);
}
// Complements the set w.r.t [0, maxval). Requires intervals be normalized.
// The result is normalized.
template <typename T, class Store>
void IntervalSet<T, Store>::Complement(T maxval,
IntervalSet<T, Store> *oset) const {
auto *ointervals = oset->MutableIntervals();
ointervals->clear();
T count = 0;
Interval interval;
interval.begin = 0;
for (auto it = intervals_.begin(); it != intervals_.end(); ++it) {
interval.end = std::min(it->begin, maxval);
if ((interval.begin) < (interval.end)) {
ointervals->push_back(interval);
count += interval.end - interval.begin;
}
interval.begin = it->end;
}
interval.end = maxval;
if ((interval.begin) < (interval.end)) {
ointervals->push_back(interval);
count += interval.end - interval.begin;
}
oset->intervals_.SetCount(count);
}
// Subtract an interval set from the set. Requires intervals be normalized.
// The result is normalized.
template <typename T, class Store>
void IntervalSet<T, Store>::Difference(const IntervalSet<T, Store> &iset,
IntervalSet<T, Store> *oset) const {
if (Empty()) {
oset->MutableIntervals()->clear();
oset->intervals_.SetCount(0);
} else {
IntervalSet<T, Store> cset;
iset.Complement(intervals_.Intervals()[intervals_.Size() - 1].end, &cset);
Intersect(cset, oset);
}
}
// Determines if an interval set overlaps with the set. Requires intervals be
// normalized.
template <typename T, class Store>
bool IntervalSet<T, Store>::Overlaps(const IntervalSet<T, Store> &iset) const {
auto it1 = intervals_.begin();
auto it2 = iset.intervals_.begin();
while (it1 != intervals_.end() && it2 != iset.intervals_.end()) {
if (it1->end <= it2->begin) {
++it1;
} else if (it2->end <= it1->begin) {
++it2;
} else {
return true;
}
}
return false;
}
// Determines if an interval set overlaps with the set but neither is contained
// in the other. Requires intervals be normalized.
template <typename T, class Store>
bool IntervalSet<T, Store>::StrictlyOverlaps(
const IntervalSet<T, Store> &iset) const {
auto it1 = intervals_.begin();
auto it2 = iset.intervals_.begin();
bool only1 = false; // Point in intervals_ but not intervals.
bool only2 = false; // Point in intervals but not intervals_.
bool overlap = false; // Point in both intervals_ and intervals.
while (it1 != intervals_.end() && it2 != iset.intervals_.end()) {
if (it1->end <= it2->begin) { // no overlap - it1 first
only1 = true;
++it1;
} else if (it2->end <= it1->begin) { // no overlap - it2 first
only2 = true;
++it2;
} else if (it2->begin == it1->begin && it2->end == it1->end) { // equals
overlap = true;
++it1;
++it2;
} else if (it2->begin <= it1->begin && it2->end >= it1->end) { // 1 c 2
only2 = true;
overlap = true;
++it1;
} else if (it1->begin <= it2->begin && it1->end >= it2->end) { // 2 c 1
only1 = true;
overlap = true;
++it2;
} else { // Strict overlap.
only1 = true;
only2 = true;
overlap = true;
}
if (only1 == true && only2 == true && overlap == true) return true;
}
if (it1 != intervals_.end()) only1 = true;
if (it2 != iset.intervals_.end()) only2 = true;
return only1 == true && only2 == true && overlap == true;
}
// Determines if an interval set is contained within the set. Requires intervals
// be normalized.
template <typename T, class Store>
bool IntervalSet<T, Store>::Contains(const IntervalSet<T, Store> &iset) const {
if (iset.Count() > Count()) return false;
auto it1 = intervals_.begin();
auto it2 = iset.intervals_.begin();
while (it1 != intervals_.end() && it2 != iset.intervals_.end()) {
if ((it1->end) <= (it2->begin)) { // No overlap; it1 first.
++it1;
} else if ((it2->begin) < (it1->begin) ||
(it2->end) > (it1->end)) { // No C.
return false;
} else if (it2->end == it1->end) {
++it1;
++it2;
} else {
++it2;
}
}
return it2 == iset.intervals_.end();
}
template <typename T, class Store>
std::ostream &operator<<(std::ostream &strm, const IntervalSet<T, Store> &s) {
strm << "{";
for (T i = 0; i < s.Size(); ++i) {
if (i > 0) {
strm << ",";
}
const auto &interval = s.Intervals()[i];
strm << "[" << interval.begin << "," << interval.end << ")";
}
strm << "}";
return strm;
}
} // namespace fst
#endif // FST_INTERVAL_SET_H_
| 0 |
coqui_public_repos/STT/native_client/kenlm/lm | coqui_public_repos/STT/native_client/kenlm/lm/common/CMakeLists.txt | # This CMake file was created by Lane Schwartz <dowobeha@gmail.com>
# Explicitly list the source files for this subdirectory
#
# If you add any source files to this subdirectory
# that should be included in the kenlm library,
# (this excludes any unit test files)
# you should add them to the following list:
#
# In order to set correct paths to these files
# in case this variable is referenced by CMake files in the parent directory,
# we prefix all files with ${CMAKE_CURRENT_SOURCE_DIR}.
#
set(KENLM_LM_COMMON_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/model_buffer.cc
${CMAKE_CURRENT_SOURCE_DIR}/print.cc
${CMAKE_CURRENT_SOURCE_DIR}/renumber.cc
${CMAKE_CURRENT_SOURCE_DIR}/size_option.cc
PARENT_SCOPE)
if(BUILD_TESTING)
KenLMAddTest(TEST model_buffer_test
LIBRARIES kenlm
TEST_ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test_data)
endif()
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/examples-vad_transcriber-py36.yml | build:
template_file: examples-base.tyml
docker_image: "python:3.6"
dependencies:
- "linux-amd64-cpu-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/examples/vad_transcriber/test.sh 3.6.0:m"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech examples: VAD transcriber Py3.6"
description: "DeepSpeech examples: VAD transcriber streaming Python 3.6"
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/lm/test_nounk.arpa |
\data\
ngram 1=36
ngram 2=45
ngram 3=10
ngram 4=6
ngram 5=4
\1-grams:
-1.383514 , -0.30103
-1.139057 . -0.845098
-1.029493 </s>
-99 <s> -0.4149733
-1.285941 a -0.69897
-1.687872 also -0.30103
-1.687872 beyond -0.30103
-1.687872 biarritz -0.30103
-1.687872 call -0.30103
-1.687872 concerns -0.30103
-1.687872 consider -0.30103
-1.687872 considering -0.30103
-1.687872 for -0.30103
-1.509559 higher -0.30103
-1.687872 however -0.30103
-1.687872 i -0.30103
-1.687872 immediate -0.30103
-1.687872 in -0.30103
-1.687872 is -0.30103
-1.285941 little -0.69897
-1.383514 loin -0.30103
-1.687872 look -0.30103
-1.285941 looking -0.4771212
-1.206319 more -0.544068
-1.509559 on -0.4771212
-1.509559 screening -0.4771212
-1.687872 small -0.30103
-1.687872 the -0.30103
-1.687872 to -0.30103
-1.687872 watch -0.30103
-1.687872 watching -0.30103
-1.687872 what -0.30103
-1.687872 would -0.30103
-3.141592 foo
-2.718281 bar 3.0
-6.535897 baz -0.0
\2-grams:
-0.6925742 , .
-0.7522095 , however
-0.7522095 , is
-0.0602359 . </s>
-0.4846522 <s> looking -0.4771214
-1.051485 <s> screening
-1.07153 <s> the
-1.07153 <s> watching
-1.07153 <s> what
-0.09132547 a little -0.69897
-0.2922095 also call
-0.2922095 beyond immediate
-0.2705918 biarritz .
-0.2922095 call for
-0.2922095 concerns in
-0.2922095 consider watch
-0.2922095 considering consider
-0.2834328 for ,
-0.5511513 higher more
-0.5845945 higher small
-0.2834328 however ,
-0.2922095 i would
-0.2922095 immediate concerns
-0.2922095 in biarritz
-0.2922095 is to
-0.09021038 little more -0.1998621
-0.7273645 loin ,
-0.6925742 loin .
-0.6708385 loin </s>
-0.2922095 look beyond
-0.4638903 looking higher
-0.4638903 looking on -0.4771212
-0.5136299 more . -0.4771212
-0.3561665 more loin
-0.1649931 on a -0.4771213
-0.1649931 screening a -0.4771213
-0.2705918 small .
-0.287799 the screening
-0.2922095 to look
-0.2622373 watch </s>
-0.2922095 watching considering
-0.2922095 what i
-0.2922095 would also
-2 also would -6
-6 foo bar
\3-grams:
-0.01916512 more . </s>
-0.0283603 on a little -0.4771212
-0.0283603 screening a little -0.4771212
-0.01660496 a little more -0.09409451
-0.3488368 <s> looking higher
-0.3488368 <s> looking on -0.4771212
-0.1892331 little more loin
-0.04835128 looking on a -0.4771212
-3 also would consider -7
-7 to look a
\4-grams:
-0.009249173 looking on a little -0.4771212
-0.005464747 on a little more -0.4771212
-0.005464747 screening a little more
-0.1453306 a little more loin
-0.01552657 <s> looking on a -0.4771212
-4 also would consider higher -8
\5-grams:
-0.003061223 <s> looking on a little
-0.001813953 looking on a little more
-0.0432557 on a little more loin
-5 also would consider higher looking
\end\
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/tts_tests/test_losses.py | import unittest
import torch as T
from TTS.tts.layers.losses import BCELossMasked, L1LossMasked, MSELossMasked, SSIMLoss
from TTS.tts.utils.helpers import sequence_mask
class L1LossMaskedTests(unittest.TestCase):
def test_in_out(self): # pylint: disable=no-self-use
# test input == target
layer = L1LossMasked(seq_len_norm=False)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
# seq_len_norm = True
# test input == target
layer = L1LossMasked(seq_len_norm=True)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
class MSELossMaskedTests(unittest.TestCase):
def test_in_out(self): # pylint: disable=no-self-use
# test input == target
layer = MSELossMasked(seq_len_norm=False)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
# seq_len_norm = True
# test input == target
layer = MSELossMasked(seq_len_norm=True)
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.ones(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 8, 128).float()
dummy_target = T.zeros(4, 8, 128).float()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 8, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(5, 9)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
class SSIMLossTests(unittest.TestCase):
def test_in_out(self): # pylint: disable=no-self-use
# test input == target
layer = SSIMLoss()
dummy_input = T.ones(4, 57, 128).float()
dummy_target = T.ones(4, 57, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.arange(0, 4 * 57 * 128)
dummy_input = dummy_input.reshape(4, 57, 128).float()
dummy_target = T.arange(-4 * 57 * 128, 0)
dummy_target = dummy_target.reshape(4, 57, 128).float()
dummy_target = -dummy_target
dummy_length = (T.ones(4) * 58).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() >= 1.0, "0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 57, 128).float()
dummy_target = T.zeros(4, 57, 128).float()
dummy_length = (T.arange(54, 58)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0.0
dummy_input = T.rand(4, 57, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(54, 58)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
# seq_len_norm = True
# test input == target
layer = L1LossMasked(seq_len_norm=True)
dummy_input = T.ones(4, 57, 128).float()
dummy_target = T.ones(4, 57, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 0.0
# test input != target
dummy_input = T.ones(4, 57, 128).float()
dummy_target = T.zeros(4, 57, 128).float()
dummy_length = (T.ones(4) * 8).long()
output = layer(dummy_input, dummy_target, dummy_length)
assert output.item() == 1.0, "1.0 vs {}".format(output.item())
# test if padded values of input makes any difference
dummy_input = T.ones(4, 57, 128).float()
dummy_target = T.zeros(4, 57, 128).float()
dummy_length = (T.arange(54, 58)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item())
dummy_input = T.rand(4, 57, 128).float()
dummy_target = dummy_input.detach()
dummy_length = (T.arange(54, 58)).long()
mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2)
output = layer(dummy_input + mask, dummy_target, dummy_length)
assert output.item() == 0, "0 vs {}".format(output.item())
class BCELossTest(unittest.TestCase):
def test_in_out(self): # pylint: disable=no-self-use
layer = BCELossMasked(pos_weight=5.0)
length = T.tensor([95])
target = (
1.0 - sequence_mask(length - 1, 100).float()
) # [0, 0, .... 1, 1] where the first 1 is the last mel frame
true_x = target * 200 - 100 # creates logits of [-100, -100, ... 100, 100] corresponding to target
zero_x = T.zeros(target.shape) - 100.0 # simulate logits if it never stops decoding
early_x = -200.0 * sequence_mask(length - 3, 100).float() + 100.0 # simulate logits on early stopping
late_x = -200.0 * sequence_mask(length + 1, 100).float() + 100.0 # simulate logits on late stopping
loss = layer(true_x, target, length)
self.assertEqual(loss.item(), 0.0)
loss = layer(early_x, target, length)
self.assertAlmostEqual(loss.item(), 2.1053, places=4)
loss = layer(late_x, target, length)
self.assertAlmostEqual(loss.item(), 5.2632, places=4)
loss = layer(zero_x, target, length)
self.assertAlmostEqual(loss.item(), 5.2632, places=4)
# pos_weight should be < 1 to penalize early stopping
layer = BCELossMasked(pos_weight=0.2)
loss = layer(true_x, target, length)
self.assertEqual(loss.item(), 0.0)
# when pos_weight < 1 overweight the early stopping loss
loss_early = layer(early_x, target, length)
loss_late = layer(late_x, target, length)
self.assertGreater(loss_early.item(), loss_late.item())
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/test/CMakeLists.txt | add_executable(fst_test
fst_test.cc
fst_test.h
)
target_link_libraries(fst_test fst ${CMAKE_DL_LIBS})
set_target_properties(fst_test PROPERTIES FOLDER test)
add_test(NAME fst_test-test COMMAND fst_test)
add_executable(weight_test
weight_test.cc
weight-tester.h
)
target_link_libraries(weight_test fst ${CMAKE_DL_LIBS})
set_target_properties(weight_test PROPERTIES FOLDER test)
add_test(NAME weight_test-test COMMAND weight_test)
add_executable(algo_test_log algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_log fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_log
PRIVATE TEST_LOG=1)
set_target_properties(algo_test_log PROPERTIES FOLDER test)
add_test(NAME algo_test_log-test COMMAND algo_test_log)
add_executable(algo_test_tropical algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_tropical fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_tropical
PRIVATE TEST_TROPICAL=1)
set_target_properties(algo_test_tropical PROPERTIES FOLDER test)
add_test(NAME algo_test_tropical-test COMMAND algo_test_tropical)
add_executable(algo_test_minmax algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_minmax fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_minmax
PRIVATE TEST_MINMAX=1)
set_target_properties(algo_test_minmax PROPERTIES FOLDER test)
add_test(NAME algo_test_minmax-test COMMAND algo_test_minmax)
add_executable(algo_test_lexicographic algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_lexicographic fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_lexicographic
PRIVATE TEST_LEXICOGRAPHIC=1)
set_target_properties(algo_test_lexicographic PROPERTIES FOLDER test)
add_test(NAME algo_test_lexicographic-test COMMAND algo_test_lexicographic)
add_executable(algo_test_power algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_power fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_power
PRIVATE TEST_POWER=1)
set_target_properties(algo_test_power PROPERTIES FOLDER test)
add_test(NAME algo_test_power-test COMMAND algo_test_power)
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/compress/CMakeLists.txt | file(GLOB HEADER_FILES ../../include/fst/extensions/compress/*.h)
message(STATUS "${HEADER_FILES}")
add_library(fstcompressscript
compress-script.cc
${HEADER_FILES}
)
target_link_libraries(fstcompressscript
fstscript
fst
${ZLIBS}
)
set_target_properties(fstcompressscript PROPERTIES
SOVERSION "10"
)
install(TARGETS fstcompressscript
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
RUNTIME DESTINATION lib
)
if(HAVE_BIN)
add_executable(fstcompress
fstcompress.cc)
target_link_libraries(fstcompress
fstcompressscript
fstscript
fst
${ZLIBS}
${CMAKE_DL_LIBS}
)
add_executable(fstrandmod
fstrandmod.cc
)
target_link_libraries(fstrandmod
fstcompressscript
fstscript
fst
${ZLIBS}
${CMAKE_DL_LIBS}
)
install(TARGETS fstcompress fstrandmod
LIBRARY DESTINATION bin
ARCHIVE DESTINATION bin
RUNTIME DESTINATION bin
)
endif(HAVE_BIN)
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/inputs/test_config.json | {
"audio":{
"audio_processor": "audio",
"num_mels": 80,
"fft_size": 1024,
"sample_rate": 22050,
"frame_length_ms": null,
"frame_shift_ms": null,
"hop_length": 256,
"win_length": 1024,
"preemphasis": 0.97,
"min_level_db": -100,
"ref_level_db": 20,
"power": 1.5,
"griffin_lim_iters": 30,
"signal_norm": true,
"symmetric_norm": true,
"clip_norm": true,
"max_norm": 4,
"mel_fmin": 0,
"mel_fmax": 8000,
"do_trim_silence": false,
"spec_gain": 20
},
"characters":{
"pad": "_",
"eos": "~",
"bos": "^",
"characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ",
"punctuations":"!'(),-.:;? ",
"phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡqɢʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫʲ"
},
"hidden_size": 128,
"embedding_size": 256,
"text_cleaner": "english_cleaners",
"epochs": 2000,
"lr": 0.003,
"lr_patience": 5,
"lr_decay": 0.5,
"batch_size": 2,
"r": 5,
"mk": 1.0,
"num_loader_workers": 0,
"memory_size": 5,
"save_step": 200,
"data_path": "tests/data/ljspeech/",
"output_path": "result",
"min_seq_len": 0,
"max_seq_len": 300,
"log_dir": "tests/outputs/",
"use_speaker_embedding": false,
"use_gst": true,
"gst": {
"gst_style_input": null,
"gst_use_speaker_embedding": true,
"gst_embedding_dim": 512,
"gst_num_heads": 4,
"gst_num_style_tokens": 10
}
}
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/util/read_compressed.cc | #include "read_compressed.hh"
#include "file.hh"
#include "have.hh"
#include "scoped.hh"
#include <algorithm>
#include <iostream>
#include <cassert>
#include <climits>
#include <cstdlib>
#include <cstring>
#ifdef HAVE_ZLIB
#include <zlib.h>
#endif
#ifdef HAVE_BZLIB
#include <bzlib.h>
#endif
#ifdef HAVE_XZLIB
#include <lzma.h>
#endif
namespace util {
CompressedException::CompressedException() throw() {}
CompressedException::~CompressedException() throw() {}
GZException::GZException() throw() {}
GZException::~GZException() throw() {}
BZException::BZException() throw() {}
BZException::~BZException() throw() {}
XZException::XZException() throw() {}
XZException::~XZException() throw() {}
void ReadBase::ReplaceThis(ReadBase *with, ReadCompressed &thunk) {
thunk.internal_.reset(with);
}
ReadBase *ReadBase::Current(ReadCompressed &thunk) { return thunk.internal_.get(); }
uint64_t &ReadBase::ReadCount(ReadCompressed &thunk) {
return thunk.raw_amount_;
}
namespace {
ReadBase *ReadFactory(int fd, uint64_t &raw_amount, const void *already_data, std::size_t already_size, bool require_compressed);
// Completed file that other classes can thunk to.
class Complete : public ReadBase {
public:
std::size_t Read(void *, std::size_t, ReadCompressed &) {
return 0;
}
};
class Uncompressed : public ReadBase {
public:
explicit Uncompressed(int fd) : fd_(fd) {}
std::size_t Read(void *to, std::size_t amount, ReadCompressed &thunk) {
std::size_t got = PartialRead(fd_.get(), to, amount);
ReadCount(thunk) += got;
return got;
}
private:
scoped_fd fd_;
};
class UncompressedWithHeader : public ReadBase {
public:
UncompressedWithHeader(int fd, const void *already_data, std::size_t already_size) : fd_(fd) {
assert(already_size);
buf_.reset(malloc(already_size));
if (!buf_.get()) throw std::bad_alloc();
memcpy(buf_.get(), already_data, already_size);
remain_ = static_cast<uint8_t*>(buf_.get());
end_ = remain_ + already_size;
}
std::size_t Read(void *to, std::size_t amount, ReadCompressed &thunk) {
assert(buf_.get());
assert(remain_ != end_);
std::size_t sending = std::min<std::size_t>(amount, end_ - remain_);
memcpy(to, remain_, sending);
remain_ += sending;
if (remain_ == end_) {
ReplaceThis(new Uncompressed(fd_.release()), thunk);
}
return sending;
}
private:
scoped_malloc buf_;
uint8_t *remain_;
uint8_t *end_;
scoped_fd fd_;
};
static const std::size_t kInputBuffer = 16384;
template <class Compression> class StreamCompressed : public ReadBase {
public:
StreamCompressed(int fd, const void *already_data, std::size_t already_size)
: file_(fd),
in_buffer_(MallocOrThrow(kInputBuffer)),
back_(memcpy(in_buffer_.get(), already_data, already_size), already_size) {}
std::size_t Read(void *to, std::size_t amount, ReadCompressed &thunk) {
if (amount == 0) return 0;
back_.SetOutput(to, amount);
do {
if (!back_.Stream().avail_in) ReadInput(thunk);
if (!back_.Process()) {
// reached end, at least for the compressed portion.
std::size_t ret = static_cast<const uint8_t *>(static_cast<void*>(back_.Stream().next_out)) - static_cast<const uint8_t*>(to);
ReplaceThis(ReadFactory(file_.release(), ReadCount(thunk), back_.Stream().next_in, back_.Stream().avail_in, true), thunk);
if (ret) return ret;
// We did not read anything this round, so clients might think EOF. Transfer responsibility to the next reader.
return Current(thunk)->Read(to, amount, thunk);
}
} while (back_.Stream().next_out == to);
return static_cast<const uint8_t*>(static_cast<void*>(back_.Stream().next_out)) - static_cast<const uint8_t*>(to);
}
private:
void ReadInput(ReadCompressed &thunk) {
assert(!back_.Stream().avail_in);
std::size_t got = ReadOrEOF(file_.get(), in_buffer_.get(), kInputBuffer);
back_.SetInput(in_buffer_.get(), got);
ReadCount(thunk) += got;
}
scoped_fd file_;
scoped_malloc in_buffer_;
Compression back_;
};
#ifdef HAVE_ZLIB
class GZip {
public:
GZip(const void *base, std::size_t amount) {
SetInput(base, amount);
stream_.zalloc = Z_NULL;
stream_.zfree = Z_NULL;
stream_.opaque = Z_NULL;
stream_.msg = NULL;
// 32 for zlib and gzip decoding with automatic header detection.
// 15 for maximum window size.
UTIL_THROW_IF(Z_OK != inflateInit2(&stream_, 32 + 15), GZException, "Failed to initialize zlib.");
}
~GZip() {
if (Z_OK != inflateEnd(&stream_)) {
std::cerr << "zlib could not close properly." << std::endl;
abort();
}
}
void SetOutput(void *to, std::size_t amount) {
stream_.next_out = static_cast<Bytef*>(to);
stream_.avail_out = std::min<std::size_t>(std::numeric_limits<uInt>::max(), amount);
}
void SetInput(const void *base, std::size_t amount) {
assert(amount < static_cast<std::size_t>(std::numeric_limits<uInt>::max()));
stream_.next_in = const_cast<Bytef*>(static_cast<const Bytef*>(base));
stream_.avail_in = amount;
}
const z_stream &Stream() const { return stream_; }
bool Process() {
int result = inflate(&stream_, 0);
switch (result) {
case Z_OK:
return true;
case Z_STREAM_END:
return false;
case Z_ERRNO:
UTIL_THROW(ErrnoException, "zlib error");
default:
UTIL_THROW(GZException, "zlib encountered " << (stream_.msg ? stream_.msg : "an error ") << " code " << result);
}
}
private:
z_stream stream_;
};
#endif // HAVE_ZLIB
#ifdef HAVE_BZLIB
class BZip {
public:
BZip(const void *base, std::size_t amount) {
memset(&stream_, 0, sizeof(stream_));
SetInput(base, amount);
HandleError(BZ2_bzDecompressInit(&stream_, 0, 0));
}
~BZip() {
try {
HandleError(BZ2_bzDecompressEnd(&stream_));
} catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
abort();
}
}
bool Process() {
int ret = BZ2_bzDecompress(&stream_);
if (ret == BZ_STREAM_END) return false;
HandleError(ret);
return true;
}
void SetOutput(void *base, std::size_t amount) {
stream_.next_out = static_cast<char*>(base);
stream_.avail_out = std::min<std::size_t>(std::numeric_limits<unsigned int>::max(), amount);
}
void SetInput(const void *base, std::size_t amount) {
stream_.next_in = const_cast<char*>(static_cast<const char*>(base));
stream_.avail_in = amount;
}
const bz_stream &Stream() const { return stream_; }
private:
void HandleError(int value) {
switch(value) {
case BZ_OK:
return;
case BZ_CONFIG_ERROR:
UTIL_THROW(BZException, "bzip2 seems to be miscompiled.");
case BZ_PARAM_ERROR:
UTIL_THROW(BZException, "bzip2 Parameter error");
case BZ_DATA_ERROR:
UTIL_THROW(BZException, "bzip2 detected a corrupt file");
case BZ_DATA_ERROR_MAGIC:
UTIL_THROW(BZException, "bzip2 detected bad magic bytes. Perhaps this was not a bzip2 file after all?");
case BZ_MEM_ERROR:
throw std::bad_alloc();
default:
UTIL_THROW(BZException, "Unknown bzip2 error code " << value);
}
}
bz_stream stream_;
};
#endif // HAVE_BZLIB
#ifdef HAVE_XZLIB
class XZip {
public:
XZip(const void *base, std::size_t amount)
: stream_(), action_(LZMA_RUN) {
memset(&stream_, 0, sizeof(stream_));
SetInput(base, amount);
HandleError(lzma_stream_decoder(&stream_, UINT64_MAX, 0));
}
~XZip() {
lzma_end(&stream_);
}
void SetOutput(void *base, std::size_t amount) {
stream_.next_out = static_cast<uint8_t*>(base);
stream_.avail_out = amount;
}
void SetInput(const void *base, std::size_t amount) {
stream_.next_in = static_cast<const uint8_t*>(base);
stream_.avail_in = amount;
if (!amount) action_ = LZMA_FINISH;
}
const lzma_stream &Stream() const { return stream_; }
bool Process() {
lzma_ret status = lzma_code(&stream_, action_);
if (status == LZMA_STREAM_END) return false;
HandleError(status);
return true;
}
private:
void HandleError(lzma_ret value) {
switch (value) {
case LZMA_OK:
return;
case LZMA_MEM_ERROR:
throw std::bad_alloc();
case LZMA_FORMAT_ERROR:
UTIL_THROW(XZException, "xzlib says file format not recognized");
case LZMA_OPTIONS_ERROR:
UTIL_THROW(XZException, "xzlib says unsupported compression options");
case LZMA_DATA_ERROR:
UTIL_THROW(XZException, "xzlib says this file is corrupt");
case LZMA_BUF_ERROR:
UTIL_THROW(XZException, "xzlib says unexpected end of input");
default:
UTIL_THROW(XZException, "unrecognized xzlib error " << value);
}
}
lzma_stream stream_;
lzma_action action_;
};
#endif // HAVE_XZLIB
class IStreamReader : public ReadBase {
public:
explicit IStreamReader(std::istream &stream) : stream_(stream) {}
std::size_t Read(void *to, std::size_t amount, ReadCompressed &thunk) {
if (!stream_.read(static_cast<char*>(to), amount)) {
UTIL_THROW_IF(!stream_.eof(), ErrnoException, "istream error");
amount = stream_.gcount();
}
ReadCount(thunk) += amount;
return amount;
}
private:
std::istream &stream_;
};
enum MagicResult {
UTIL_UNKNOWN, UTIL_GZIP, UTIL_BZIP, UTIL_XZIP
};
MagicResult DetectMagic(const void *from_void, std::size_t length) {
const uint8_t *header = static_cast<const uint8_t*>(from_void);
if (length >= 2 && header[0] == 0x1f && header[1] == 0x8b) {
return UTIL_GZIP;
}
const uint8_t kBZMagic[3] = {'B', 'Z', 'h'};
if (length >= sizeof(kBZMagic) && !memcmp(header, kBZMagic, sizeof(kBZMagic))) {
return UTIL_BZIP;
}
const uint8_t kXZMagic[6] = { 0xFD, '7', 'z', 'X', 'Z', 0x00 };
if (length >= sizeof(kXZMagic) && !memcmp(header, kXZMagic, sizeof(kXZMagic))) {
return UTIL_XZIP;
}
return UTIL_UNKNOWN;
}
ReadBase *ReadFactory(int fd, uint64_t &raw_amount, const void *already_data, const std::size_t already_size, bool require_compressed) {
scoped_fd hold(fd);
std::string header(reinterpret_cast<const char*>(already_data), already_size);
if (header.size() < ReadCompressed::kMagicSize) {
std::size_t original = header.size();
header.resize(ReadCompressed::kMagicSize);
std::size_t got = ReadOrEOF(fd, &header[original], ReadCompressed::kMagicSize - original);
raw_amount += got;
header.resize(original + got);
}
if (header.empty()) {
return new Complete();
}
switch (DetectMagic(&header[0], header.size())) {
case UTIL_GZIP:
#ifdef HAVE_ZLIB
return new StreamCompressed<GZip>(hold.release(), header.data(), header.size());
#else
UTIL_THROW(CompressedException, "This looks like a gzip file but gzip support was not compiled in.");
#endif
case UTIL_BZIP:
#ifdef HAVE_BZLIB
return new StreamCompressed<BZip>(hold.release(), &header[0], header.size());
#else
UTIL_THROW(CompressedException, "This looks like a bzip file (it begins with BZh), but bzip support was not compiled in.");
#endif
case UTIL_XZIP:
#ifdef HAVE_XZLIB
return new StreamCompressed<XZip>(hold.release(), header.data(), header.size());
#else
UTIL_THROW(CompressedException, "This looks like an xz file, but xz support was not compiled in.");
#endif
default:
UTIL_THROW_IF(require_compressed, CompressedException, "Uncompressed data detected after a compresssed file. This could be supported but usually indicates an error.");
return new UncompressedWithHeader(hold.release(), header.data(), header.size());
}
}
} // namespace
bool ReadCompressed::DetectCompressedMagic(const void *from_void) {
return DetectMagic(from_void, kMagicSize) != UTIL_UNKNOWN;
}
ReadCompressed::ReadCompressed(int fd) {
Reset(fd);
}
ReadCompressed::ReadCompressed(std::istream &in) {
Reset(in);
}
ReadCompressed::ReadCompressed() {}
void ReadCompressed::Reset(int fd) {
raw_amount_ = 0;
internal_.reset();
internal_.reset(ReadFactory(fd, raw_amount_, NULL, 0, false));
}
void ReadCompressed::Reset(std::istream &in) {
internal_.reset();
internal_.reset(new IStreamReader(in));
}
std::size_t ReadCompressed::Read(void *to, std::size_t amount) {
return internal_->Read(to, amount, *this);
}
std::size_t ReadCompressed::ReadOrEOF(void *const to_in, std::size_t amount) {
uint8_t *to = reinterpret_cast<uint8_t*>(to_in);
while (amount) {
std::size_t got = Read(to, amount);
if (!got) break;
to += got;
amount -= got;
}
return to - reinterpret_cast<uint8_t*>(to_in);
}
} // namespace util
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/special/fstspecial.cc | // Work-around to correctly build (e.g. distclean) with autotools
// using files in another directory that are also built there.
// See https://stackoverflow.com/questions/30379837.
#include "fstconvert-main.cc" // NOLINT
#include "fstconvert.cc" // NOLINT
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/depcomp | #! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
scriptversion=2013-05-30.07; # UTC
# Copyright (C) 1999-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
case $1 in
'')
echo "$0: No command. Try '$0 --help' for more information." 1>&2
exit 1;
;;
-h | --h*)
cat <<\EOF
Usage: depcomp [--help] [--version] PROGRAM [ARGS]
Run PROGRAMS ARGS to compile a file, generating dependencies
as side-effects.
Environment variables:
depmode Dependency tracking mode.
source Source file read by 'PROGRAMS ARGS'.
object Object file output by 'PROGRAMS ARGS'.
DEPDIR directory where to store dependencies.
depfile Dependency file to output.
tmpdepfile Temporary file to use when outputting dependencies.
libtool Whether libtool is used (yes/no).
Report bugs to <bug-automake@gnu.org>.
EOF
exit $?
;;
-v | --v*)
echo "depcomp $scriptversion"
exit $?
;;
esac
# Get the directory component of the given path, and save it in the
# global variables '$dir'. Note that this directory component will
# be either empty or ending with a '/' character. This is deliberate.
set_dir_from ()
{
case $1 in
*/*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;;
*) dir=;;
esac
}
# Get the suffix-stripped basename of the given path, and save it the
# global variable '$base'.
set_base_from ()
{
base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'`
}
# If no dependency file was actually created by the compiler invocation,
# we still have to create a dummy depfile, to avoid errors with the
# Makefile "include basename.Plo" scheme.
make_dummy_depfile ()
{
echo "#dummy" > "$depfile"
}
# Factor out some common post-processing of the generated depfile.
# Requires the auxiliary global variable '$tmpdepfile' to be set.
aix_post_process_depfile ()
{
# If the compiler actually managed to produce a dependency file,
# post-process it.
if test -f "$tmpdepfile"; then
# Each line is of the form 'foo.o: dependency.h'.
# Do two passes, one to just change these to
# $object: dependency.h
# and one to simply output
# dependency.h:
# which is needed to avoid the deleted-header problem.
{ sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile"
sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile"
} > "$depfile"
rm -f "$tmpdepfile"
else
make_dummy_depfile
fi
}
# A tabulation character.
tab=' '
# A newline character.
nl='
'
# Character ranges might be problematic outside the C locale.
# These definitions help.
upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ
lower=abcdefghijklmnopqrstuvwxyz
digits=0123456789
alpha=${upper}${lower}
if test -z "$depmode" || test -z "$source" || test -z "$object"; then
echo "depcomp: Variables source, object and depmode must be set" 1>&2
exit 1
fi
# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
depfile=${depfile-`echo "$object" |
sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
rm -f "$tmpdepfile"
# Avoid interferences from the environment.
gccflag= dashmflag=
# Some modes work just like other modes, but use different flags. We
# parameterize here, but still list the modes in the big case below,
# to make depend.m4 easier to write. Note that we *cannot* use a case
# here, because this file can only contain one case statement.
if test "$depmode" = hp; then
# HP compiler uses -M and no extra arg.
gccflag=-M
depmode=gcc
fi
if test "$depmode" = dashXmstdout; then
# This is just like dashmstdout with a different argument.
dashmflag=-xM
depmode=dashmstdout
fi
cygpath_u="cygpath -u -f -"
if test "$depmode" = msvcmsys; then
# This is just like msvisualcpp but w/o cygpath translation.
# Just convert the backslash-escaped backslashes to single forward
# slashes to satisfy depend.m4
cygpath_u='sed s,\\\\,/,g'
depmode=msvisualcpp
fi
if test "$depmode" = msvc7msys; then
# This is just like msvc7 but w/o cygpath translation.
# Just convert the backslash-escaped backslashes to single forward
# slashes to satisfy depend.m4
cygpath_u='sed s,\\\\,/,g'
depmode=msvc7
fi
if test "$depmode" = xlc; then
# IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information.
gccflag=-qmakedep=gcc,-MF
depmode=gcc
fi
case "$depmode" in
gcc3)
## gcc 3 implements dependency tracking that does exactly what
## we want. Yay! Note: for some reason libtool 1.4 doesn't like
## it if -MD -MP comes after the -MF stuff. Hmm.
## Unfortunately, FreeBSD c89 acceptance of flags depends upon
## the command line argument order; so add the flags where they
## appear in depend2.am. Note that the slowdown incurred here
## affects only configure: in makefiles, %FASTDEP% shortcuts this.
for arg
do
case $arg in
-c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
*) set fnord "$@" "$arg" ;;
esac
shift # fnord
shift # $arg
done
"$@"
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
mv "$tmpdepfile" "$depfile"
;;
gcc)
## Note that this doesn't just cater to obsosete pre-3.x GCC compilers.
## but also to in-use compilers like IMB xlc/xlC and the HP C compiler.
## (see the conditional assignment to $gccflag above).
## There are various ways to get dependency output from gcc. Here's
## why we pick this rather obscure method:
## - Don't want to use -MD because we'd like the dependencies to end
## up in a subdir. Having to rename by hand is ugly.
## (We might end up doing this anyway to support other compilers.)
## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
## -MM, not -M (despite what the docs say). Also, it might not be
## supported by the other compilers which use the 'gcc' depmode.
## - Using -M directly means running the compiler twice (even worse
## than renaming).
if test -z "$gccflag"; then
gccflag=-MD,
fi
"$@" -Wp,"$gccflag$tmpdepfile"
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
echo "$object : \\" > "$depfile"
# The second -e expression handles DOS-style file names with drive
# letters.
sed -e 's/^[^:]*: / /' \
-e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
## This next piece of magic avoids the "deleted header file" problem.
## The problem is that when a header file which appears in a .P file
## is deleted, the dependency causes make to die (because there is
## typically no way to rebuild the header). We avoid this by adding
## dummy dependencies for each header file. Too bad gcc doesn't do
## this for us directly.
## Some versions of gcc put a space before the ':'. On the theory
## that the space means something, we add a space to the output as
## well. hp depmode also adds that space, but also prefixes the VPATH
## to the object. Take care to not repeat it in the output.
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
tr ' ' "$nl" < "$tmpdepfile" \
| sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
| sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
hp)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
sgi)
if test "$libtool" = yes; then
"$@" "-Wp,-MDupdate,$tmpdepfile"
else
"$@" -MDupdate "$tmpdepfile"
fi
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
echo "$object : \\" > "$depfile"
# Clip off the initial element (the dependent). Don't try to be
# clever and replace this with sed code, as IRIX sed won't handle
# lines with more than a fixed number of characters (4096 in
# IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
# the IRIX cc adds comments like '#:fec' to the end of the
# dependency line.
tr ' ' "$nl" < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \
| tr "$nl" ' ' >> "$depfile"
echo >> "$depfile"
# The second pass generates a dummy entry for each header file.
tr ' ' "$nl" < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
>> "$depfile"
else
make_dummy_depfile
fi
rm -f "$tmpdepfile"
;;
xlc)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
aix)
# The C for AIX Compiler uses -M and outputs the dependencies
# in a .u file. In older versions, this file always lives in the
# current directory. Also, the AIX compiler puts '$object:' at the
# start of each line; $object doesn't have directory information.
# Version 6 uses the directory in both cases.
set_dir_from "$object"
set_base_from "$object"
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.u
tmpdepfile2=$base.u
tmpdepfile3=$dir.libs/$base.u
"$@" -Wc,-M
else
tmpdepfile1=$dir$base.u
tmpdepfile2=$dir$base.u
tmpdepfile3=$dir$base.u
"$@" -M
fi
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
exit $stat
fi
for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
do
test -f "$tmpdepfile" && break
done
aix_post_process_depfile
;;
tcc)
# tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26
# FIXME: That version still under development at the moment of writing.
# Make that this statement remains true also for stable, released
# versions.
# It will wrap lines (doesn't matter whether long or short) with a
# trailing '\', as in:
#
# foo.o : \
# foo.c \
# foo.h \
#
# It will put a trailing '\' even on the last line, and will use leading
# spaces rather than leading tabs (at least since its commit 0394caf7
# "Emit spaces for -MD").
"$@" -MD -MF "$tmpdepfile"
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
# Each non-empty line is of the form 'foo.o : \' or ' dep.h \'.
# We have to change lines of the first kind to '$object: \'.
sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile"
# And for each line of the second kind, we have to emit a 'dep.h:'
# dummy dependency, to avoid the deleted-header problem.
sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile"
rm -f "$tmpdepfile"
;;
## The order of this option in the case statement is important, since the
## shell code in configure will try each of these formats in the order
## listed in this file. A plain '-MD' option would be understood by many
## compilers, so we must ensure this comes after the gcc and icc options.
pgcc)
# Portland's C compiler understands '-MD'.
# Will always output deps to 'file.d' where file is the root name of the
# source file under compilation, even if file resides in a subdirectory.
# The object file name does not affect the name of the '.d' file.
# pgcc 10.2 will output
# foo.o: sub/foo.c sub/foo.h
# and will wrap long lines using '\' :
# foo.o: sub/foo.c ... \
# sub/foo.h ... \
# ...
set_dir_from "$object"
# Use the source, not the object, to determine the base name, since
# that's sadly what pgcc will do too.
set_base_from "$source"
tmpdepfile=$base.d
# For projects that build the same source file twice into different object
# files, the pgcc approach of using the *source* file root name can cause
# problems in parallel builds. Use a locking strategy to avoid stomping on
# the same $tmpdepfile.
lockdir=$base.d-lock
trap "
echo '$0: caught signal, cleaning up...' >&2
rmdir '$lockdir'
exit 1
" 1 2 13 15
numtries=100
i=$numtries
while test $i -gt 0; do
# mkdir is a portable test-and-set.
if mkdir "$lockdir" 2>/dev/null; then
# This process acquired the lock.
"$@" -MD
stat=$?
# Release the lock.
rmdir "$lockdir"
break
else
# If the lock is being held by a different process, wait
# until the winning process is done or we timeout.
while test -d "$lockdir" && test $i -gt 0; do
sleep 1
i=`expr $i - 1`
done
fi
i=`expr $i - 1`
done
trap - 1 2 13 15
if test $i -le 0; then
echo "$0: failed to acquire lock after $numtries attempts" >&2
echo "$0: check lockdir '$lockdir'" >&2
exit 1
fi
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
# Each line is of the form `foo.o: dependent.h',
# or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
# Do two passes, one to just change these to
# `$object: dependent.h' and one to simply `dependent.h:'.
sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process this invocation
# correctly. Breaking it into two sed invocations is a workaround.
sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \
| sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
hp2)
# The "hp" stanza above does not work with aCC (C++) and HP's ia64
# compilers, which have integrated preprocessors. The correct option
# to use with these is +Maked; it writes dependencies to a file named
# 'foo.d', which lands next to the object file, wherever that
# happens to be.
# Much of this is similar to the tru64 case; see comments there.
set_dir_from "$object"
set_base_from "$object"
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir.libs/$base.d
"$@" -Wc,+Maked
else
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir$base.d
"$@" +Maked
fi
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile1" "$tmpdepfile2"
exit $stat
fi
for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
do
test -f "$tmpdepfile" && break
done
if test -f "$tmpdepfile"; then
sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile"
# Add 'dependent.h:' lines.
sed -ne '2,${
s/^ *//
s/ \\*$//
s/$/:/
p
}' "$tmpdepfile" >> "$depfile"
else
make_dummy_depfile
fi
rm -f "$tmpdepfile" "$tmpdepfile2"
;;
tru64)
# The Tru64 compiler uses -MD to generate dependencies as a side
# effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
# At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
# dependencies in 'foo.d' instead, so we check for that too.
# Subdirectories are respected.
set_dir_from "$object"
set_base_from "$object"
if test "$libtool" = yes; then
# Libtool generates 2 separate objects for the 2 libraries. These
# two compilations output dependencies in $dir.libs/$base.o.d and
# in $dir$base.o.d. We have to check for both files, because
# one of the two compilations can be disabled. We should prefer
# $dir$base.o.d over $dir.libs/$base.o.d because the latter is
# automatically cleaned when .libs/ is deleted, while ignoring
# the former would cause a distcleancheck panic.
tmpdepfile1=$dir$base.o.d # libtool 1.5
tmpdepfile2=$dir.libs/$base.o.d # Likewise.
tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504
"$@" -Wc,-MD
else
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir$base.d
tmpdepfile3=$dir$base.d
"$@" -MD
fi
stat=$?
if test $stat -ne 0; then
rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
exit $stat
fi
for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
do
test -f "$tmpdepfile" && break
done
# Same post-processing that is required for AIX mode.
aix_post_process_depfile
;;
msvc7)
if test "$libtool" = yes; then
showIncludes=-Wc,-showIncludes
else
showIncludes=-showIncludes
fi
"$@" $showIncludes > "$tmpdepfile"
stat=$?
grep -v '^Note: including file: ' "$tmpdepfile"
if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
echo "$object : \\" > "$depfile"
# The first sed program below extracts the file names and escapes
# backslashes for cygpath. The second sed program outputs the file
# name when reading, but also accumulates all include files in the
# hold buffer in order to output them again at the end. This only
# works with sed implementations that can handle large buffers.
sed < "$tmpdepfile" -n '
/^Note: including file: *\(.*\)/ {
s//\1/
s/\\/\\\\/g
p
}' | $cygpath_u | sort -u | sed -n '
s/ /\\ /g
s/\(.*\)/'"$tab"'\1 \\/p
s/.\(.*\) \\/\1:/
H
$ {
s/.*/'"$tab"'/
G
p
}' >> "$depfile"
echo >> "$depfile" # make sure the fragment doesn't end with a backslash
rm -f "$tmpdepfile"
;;
msvc7msys)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
#nosideeffect)
# This comment above is used by automake to tell side-effect
# dependency tracking mechanisms from slower ones.
dashmstdout)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout, regardless of -o.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
# Remove '-o $object'.
IFS=" "
for arg
do
case $arg in
-o)
shift
;;
$object)
shift
;;
*)
set fnord "$@" "$arg"
shift # fnord
shift # $arg
;;
esac
done
test -z "$dashmflag" && dashmflag=-M
# Require at least two characters before searching for ':'
# in the target name. This is to cope with DOS-style filenames:
# a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
"$@" $dashmflag |
sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile"
rm -f "$depfile"
cat < "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process this sed invocation
# correctly. Breaking it into two sed invocations is a workaround.
tr ' ' "$nl" < "$tmpdepfile" \
| sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
| sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
dashXmstdout)
# This case only exists to satisfy depend.m4. It is never actually
# run, as this mode is specially recognized in the preamble.
exit 1
;;
makedepend)
"$@" || exit $?
# Remove any Libtool call
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
# X makedepend
shift
cleared=no eat=no
for arg
do
case $cleared in
no)
set ""; shift
cleared=yes ;;
esac
if test $eat = yes; then
eat=no
continue
fi
case "$arg" in
-D*|-I*)
set fnord "$@" "$arg"; shift ;;
# Strip any option that makedepend may not understand. Remove
# the object too, otherwise makedepend will parse it as a source file.
-arch)
eat=yes ;;
-*|$object)
;;
*)
set fnord "$@" "$arg"; shift ;;
esac
done
obj_suffix=`echo "$object" | sed 's/^.*\././'`
touch "$tmpdepfile"
${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
rm -f "$depfile"
# makedepend may prepend the VPATH from the source file name to the object.
# No need to regex-escape $object, excess matching of '.' is harmless.
sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process the last invocation
# correctly. Breaking it into two sed invocations is a workaround.
sed '1,2d' "$tmpdepfile" \
| tr ' ' "$nl" \
| sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
| sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile" "$tmpdepfile".bak
;;
cpp)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
# Remove '-o $object'.
IFS=" "
for arg
do
case $arg in
-o)
shift
;;
$object)
shift
;;
*)
set fnord "$@" "$arg"
shift # fnord
shift # $arg
;;
esac
done
"$@" -E \
| sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
-e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
| sed '$ s: \\$::' > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
cat < "$tmpdepfile" >> "$depfile"
sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
msvisualcpp)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
IFS=" "
for arg
do
case "$arg" in
-o)
shift
;;
$object)
shift
;;
"-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
set fnord "$@"
shift
shift
;;
*)
set fnord "$@" "$arg"
shift
shift
;;
esac
done
"$@" -E 2>/dev/null |
sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
echo "$tab" >> "$depfile"
sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
rm -f "$tmpdepfile"
;;
msvcmsys)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
none)
exec "$@"
;;
*)
echo "Unknown depmode $depmode" 1>&2
exit 1
;;
esac
exit 0
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:
| 0 |
coqui_public_repos/STT/native_client/wasm | coqui_public_repos/STT/native_client/wasm/test/Makefile | NPM_TOOL ?= npm
TEST_PROJECT_NAME ?= stt-wasm
TEST_PROJECT_VERSION ?= $(shell cat ../../../training/coqui_stt_training/VERSION | tr -d '\n')
clean:
rm -rf package.json package-lock.json
rm -rf node_modules
package.json: package.json.in
cp package.json.in package.json
install-dependencies: package.json
${NPM_TOOL} install
install-stt-wasm: package.json
${NPM_TOOL} install ${CI_TMP_DIR}/${TEST_PROJECT_NAME}-${TEST_PROJECT_VERSION}.tgz
test: install-dependencies install-stt-wasm
${NPM_TOOL} run test
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/compose-filter.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes for filtering the composition matches, e.g. for correct epsilon
// handling.
#ifndef FST_COMPOSE_FILTER_H_
#define FST_COMPOSE_FILTER_H_
#include <fst/filter-state.h>
#include <fst/fst-decl.h> // For optional argument declarations
#include <fst/fst.h>
#include <fst/matcher.h>
namespace fst {
// Composition filters determine which matches are allowed to proceed. The
// filter's state is represeted by the type ComposeFilter::FilterState.
// The basic filters handle correct epsilon matching. Their interface is:
//
// template <class M1, class M2>
// class ComposeFilter {
// public:
// using Matcher1 = ...;
// using Matcher2 = ...;
// using FST1 = typename M1::FST;
// using FST2 = typename M2::FST;
// using FilterState = ...;
//
// using Arc = typename FST1::Arc;
// using StateId = typename Arc::StateId;
// using Weight = typename Arc::Weight;
//
// // Required constructor.
// ComposeFilter(const FST1 &fst1, const FST2 &fst2,
// M1 *matcher1 = nullptr, M2 *matcher2 = nullptr);
//
// // If safe=true, the copy is thread-safe. See Fst<>::Copy()
// // for further doc.
// ComposeFilter(const ComposeFilter<M1, M2> &filter,
// bool safe = false);
//
// // Return start state of filter.
// FilterState Start() const;
//
// // Specifies current composition state.
// void SetState(StateId s1, StateId s2, const FilterState &fs);
//
// // Apply filter at current composition state to these transitions. If an
// // arc label to be matched is kNolabel, then that side does not consume a
// // symbol. Returns the new filter state or, if disallowed,
// // FilterState::NoState(). The filter is permitted to modify its inputs
// // (e.g. for optimization reasons).
// FilterState FilterArc(Arc *arc1, Arc *arc2) const;
// // Apply filter at current composition state to these final weights
// // (cf. superfinal transitions). The filter may modify its inputs
// // (e.g. for optimization reasons).
// void FilterFinal(Weight *w1, Weight *w2) const;
//
// // Return the respective matchers. Ownership stays with filter. These
// // methods allow the filter to access and possibly modify the compositio
// // matchers (useful, e.g., with lookahead).
//
// Matcher1 *GetMatcher1();
//
// Matcher2 *GetMatcher2();
//
// // This specifies how the filter affects the composition result properties.
// It takes as argument the properties that would apply with a trivial
// // composition filter.
// uint64 Properties(uint64 props) const;
// };
//
// This filter allows only exact matching of symbols from FST1 with on FST2;
// e.g., no special interpretation of epsilons.
template <class M1, class M2 /* = M1 */>
class NullComposeFilter {
public:
using Matcher1 = M1;
using Matcher2 = M2;
using FST1 = typename M1::FST;
using FST2 = typename M2::FST;
using FilterState = TrivialFilterState;
using Arc = typename FST1::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
NullComposeFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr)
: matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)),
matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()) {}
NullComposeFilter(const NullComposeFilter<M1, M2> &filter, bool safe = false)
: matcher1_(filter.matcher1_->Copy(safe)),
matcher2_(filter.matcher2_->Copy(safe)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()) {}
FilterState Start() const { return FilterState(true); }
void SetState(StateId, StateId, const FilterState &) {}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
return (arc1->olabel == kNoLabel || arc2->ilabel == kNoLabel)
? FilterState::NoState()
: FilterState(true);
}
void FilterFinal(Weight *, Weight *) const {}
Matcher1 *GetMatcher1() { return matcher1_.get(); }
Matcher2 *GetMatcher2() { return matcher2_.get(); }
uint64 Properties(uint64 props) const { return props; }
private:
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
const FST1 &fst1_;
const FST2 &fst2_;
};
// This filter allows all epsilon matches, potentially resulting in redundant
// epsilon paths. The use of this filter gives correct results iff one of the
// following conditions hold:
//
// (1) The semiring is idempotent,
// (2) the first FST is output-epsilon free, or
// (3) the second FST is input-epsilon free.
//
// For (1), redundant epsilon paths may be created but won't hurt correctness.
// For (2) and (3), no redundant paths are created.
template <class M1, class M2 /* = M1 */>
class TrivialComposeFilter {
public:
using Matcher1 = M1;
using Matcher2 = M2;
using FST1 = typename M1::FST;
using FST2 = typename M2::FST;
using FilterState = TrivialFilterState;
using Arc = typename FST1::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
TrivialComposeFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr,
Matcher2 *matcher2 = nullptr)
: matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)),
matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()) {}
TrivialComposeFilter(const TrivialComposeFilter<Matcher1, Matcher2> &filter,
bool safe = false)
: matcher1_(filter.matcher1_->Copy(safe)),
matcher2_(filter.matcher2_->Copy(safe)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()) {}
FilterState Start() const { return FilterState(true); }
void SetState(StateId, StateId, const FilterState &) {}
FilterState FilterArc(Arc *, Arc *) const { return FilterState(true); }
void FilterFinal(Weight *, Weight *) const {}
Matcher1 *GetMatcher1() { return matcher1_.get(); }
Matcher2 *GetMatcher2() { return matcher2_.get(); }
uint64 Properties(uint64 props) const { return props; }
private:
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
const FST1 &fst1_;
const FST2 &fst2_;
};
// This filter requires epsilons on FST1 to be read before epsilons on FST2.
template <class M1, class M2 /* = M1 */>
class SequenceComposeFilter {
public:
using Matcher1 = M1;
using Matcher2 = M2;
using FST1 = typename M1::FST;
using FST2 = typename M2::FST;
using FilterState = CharFilterState;
using Arc = typename FST1::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
SequenceComposeFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr,
Matcher2 *matcher2 = nullptr)
: matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)),
matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)),
fst1_(matcher1_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
SequenceComposeFilter(const SequenceComposeFilter<Matcher1, Matcher2> &filter,
bool safe = false)
: matcher1_(filter.matcher1_->Copy(safe)),
matcher2_(filter.matcher2_->Copy(safe)),
fst1_(matcher1_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
FilterState Start() const { return FilterState(0); }
void SetState(StateId s1, StateId s2, const FilterState &fs) {
if (s1_ == s1 && s2_ == s2 && fs == fs_) return;
s1_ = s1;
s2_ = s2;
fs_ = fs;
const auto na1 = internal::NumArcs(fst1_, s1);
const auto ne1 = internal::NumOutputEpsilons(fst1_, s1);
const bool fin1 = internal::Final(fst1_, s1) != Weight::Zero();
alleps1_ = na1 == ne1 && !fin1;
noeps1_ = ne1 == 0;
}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
if (arc1->olabel == kNoLabel) {
return alleps1_ ? FilterState::NoState() : noeps1_ ? FilterState(0)
: FilterState(1);
} else if (arc2->ilabel == kNoLabel) {
return fs_ != FilterState(0) ? FilterState::NoState() : FilterState(0);
} else {
return arc1->olabel == 0 ? FilterState::NoState() : FilterState(0);
}
}
void FilterFinal(Weight *, Weight *) const {}
Matcher1 *GetMatcher1() { return matcher1_.get(); }
Matcher2 *GetMatcher2() { return matcher2_.get(); }
uint64 Properties(uint64 props) const { return props; }
private:
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
const FST1 &fst1_;
StateId s1_; // Current fst1_ state.
StateId s2_; // Current fst2_ state.
FilterState fs_; // Current filter state.
bool alleps1_; // Only epsilons (and non-final) leaving s1_?
bool noeps1_; // No epsilons leaving s1_?
};
// This filter requires epsilons on FST2 to be read before epsilons on FST1.
template <class M1, class M2 /* = M1 */>
class AltSequenceComposeFilter {
public:
using Matcher1 = M1;
using Matcher2 = M2;
using FST1 = typename M1::FST;
using FST2 = typename M2::FST;
using FilterState = CharFilterState;
using Arc = typename FST1::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
AltSequenceComposeFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr,
Matcher2 *matcher2 = nullptr)
: matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)),
matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)),
fst2_(matcher2_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
AltSequenceComposeFilter(
const AltSequenceComposeFilter<Matcher1, Matcher2> &filter,
bool safe = false)
: matcher1_(filter.matcher1_->Copy(safe)),
matcher2_(filter.matcher2_->Copy(safe)),
fst2_(matcher2_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
FilterState Start() const { return FilterState(0); }
void SetState(StateId s1, StateId s2, const FilterState &fs) {
if (s1_ == s1 && s2_ == s2 && fs == fs_) return;
s1_ = s1;
s2_ = s2;
fs_ = fs;
const auto na2 = internal::NumArcs(fst2_, s2);
const auto ne2 = internal::NumInputEpsilons(fst2_, s2);
const bool fin2 = internal::Final(fst2_, s2) != Weight::Zero();
alleps2_ = na2 == ne2 && !fin2;
noeps2_ = ne2 == 0;
}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
if (arc2->ilabel == kNoLabel) {
return alleps2_ ? FilterState::NoState() : noeps2_ ? FilterState(0)
: FilterState(1);
} else if (arc1->olabel == kNoLabel) {
return fs_ == FilterState(1) ? FilterState::NoState() : FilterState(0);
} else {
return arc1->olabel == 0 ? FilterState::NoState() : FilterState(0);
}
}
void FilterFinal(Weight *, Weight *) const {}
Matcher1 *GetMatcher1() { return matcher1_.get(); }
Matcher2 *GetMatcher2() { return matcher2_.get(); }
uint64 Properties(uint64 props) const { return props; }
private:
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
const FST2 &fst2_;
StateId s1_; // Current fst1_ state.
StateId s2_; // Current fst2_ state.
FilterState fs_; // Current filter state.
bool alleps2_; // Only epsilons (and non-final) leaving s2_?
bool noeps2_; // No epsilons leaving s2_?
};
// This filter requires epsilons on FST1 to be matched with epsilons on FST2
// whenever possible. (Template arg default declared in fst-decl.h.)
template <class M1, class M2 /* = M1 */>
class MatchComposeFilter {
public:
using Matcher1 = M1;
using Matcher2 = M2;
using FST1 = typename M1::FST;
using FST2 = typename M2::FST;
using FilterState = CharFilterState;
using Arc = typename FST1::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
MatchComposeFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr)
: matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)),
matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
MatchComposeFilter(const MatchComposeFilter<Matcher1, Matcher2> &filter,
bool safe = false)
: matcher1_(filter.matcher1_->Copy(safe)),
matcher2_(filter.matcher2_->Copy(safe)),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()),
s1_(kNoStateId),
s2_(kNoStateId),
fs_(kNoStateId) {}
FilterState Start() const { return FilterState(0); }
void SetState(StateId s1, StateId s2, const FilterState &fs) {
if (s1_ == s1 && s2_ == s2 && fs == fs_) return;
s1_ = s1;
s2_ = s2;
fs_ = fs;
size_t na1 = internal::NumArcs(fst1_, s1);
size_t ne1 = internal::NumOutputEpsilons(fst1_, s1);
bool f1 = internal::Final(fst1_, s1) != Weight::Zero();
alleps1_ = na1 == ne1 && !f1;
noeps1_ = ne1 == 0;
size_t na2 = internal::NumArcs(fst2_, s2);
size_t ne2 = internal::NumInputEpsilons(fst2_, s2);
bool f2 = internal::Final(fst2_, s2) != Weight::Zero();
alleps2_ = na2 == ne2 && !f2;
noeps2_ = ne2 == 0;
}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
if (arc2->ilabel == kNoLabel) { // Epsilon in FST1.
return fs_ == FilterState(0)
? (noeps2_
? FilterState(0)
: (alleps2_ ? FilterState::NoState() : FilterState(1)))
: (fs_ == FilterState(1) ? FilterState(1)
: FilterState::NoState());
} else if (arc1->olabel == kNoLabel) { // Epsilon in FST2.
return fs_ == FilterState(0)
? (noeps1_
? FilterState(0)
: (alleps1_ ? FilterState::NoState() : FilterState(2)))
: (fs_ == FilterState(2) ? FilterState(2)
: FilterState::NoState());
} else if (arc1->olabel == 0) { // Epsilon in both.
return fs_ == FilterState(0) ? FilterState(0) : FilterState::NoState();
} else { // Both are non-epsilons.
return FilterState(0);
}
}
void FilterFinal(Weight *, Weight *) const {}
Matcher1 *GetMatcher1() { return matcher1_.get(); }
Matcher2 *GetMatcher2() { return matcher2_.get(); }
uint64 Properties(uint64 props) const { return props; }
private:
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
const FST1 &fst1_;
const FST2 &fst2_;
StateId s1_; // Current fst1_ state.
StateId s2_; // Current fst2_ state.
FilterState fs_; // Current filter state ID.
bool alleps1_; // Only epsilson (and non-final) leaving s1?
bool alleps2_; // Only epsilons (and non-final) leaving s2?
bool noeps1_; // No epsilons leaving s1?
bool noeps2_; // No epsilons leaving s2?
};
// This filter works with the MultiEpsMatcher to determine if multi-epsilons are
// preserved in the composition output (rather than rewritten as 0) and
// ensures correct properties.
template <class Filter>
class MultiEpsFilter {
public:
using Matcher1 = typename Filter::Matcher1;
using Matcher2 = typename Filter::Matcher2;
using FST1 = typename Filter::FST1;
using FST2 = typename Filter::FST2;
using FilterState = typename Filter::FilterState;
using Arc = typename Filter::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
MultiEpsFilter(const FST1 &fst1, const FST2 &fst2,
Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr,
bool keep_multi_eps = false)
: filter_(fst1, fst2, matcher1, matcher2),
keep_multi_eps_(keep_multi_eps) {}
MultiEpsFilter(const MultiEpsFilter<Filter> &filter, bool safe = false)
: filter_(filter.filter_, safe),
keep_multi_eps_(filter.keep_multi_eps_) {}
FilterState Start() const { return filter_.Start(); }
void SetState(StateId s1, StateId s2, const FilterState &fs) {
return filter_.SetState(s1, s2, fs);
}
FilterState FilterArc(Arc *arc1, Arc *arc2) const {
const auto fs = filter_.FilterArc(arc1, arc2);
if (keep_multi_eps_) {
if (arc1->olabel == kNoLabel) arc1->ilabel = arc2->ilabel;
if (arc2->ilabel == kNoLabel) arc2->olabel = arc1->olabel;
}
return fs;
}
void FilterFinal(Weight *w1, Weight *w2) const {
return filter_.FilterFinal(w1, w2);
}
Matcher1 *GetMatcher1() { return filter_.GetMatcher1(); }
Matcher2 *GetMatcher2() { return filter_.GetMatcher2(); }
uint64 Properties(uint64 iprops) const {
const auto oprops = filter_.Properties(iprops);
return oprops & kILabelInvariantProperties & kOLabelInvariantProperties;
}
private:
Filter filter_;
bool keep_multi_eps_;
};
} // namespace fst
#endif // FST_COMPOSE_FILTER_H_
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/tensor.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stddef.h>
#include <iostream>
#include <string>
#include <vector>
#include "gsl/gsl"
#include "core/common/common.h"
#include "core/framework/allocator.h"
#include "core/framework/tensor_shape.h"
#include "core/framework/buffer_deleter.h"
#include "onnxruntime_config.h"
#include "core/framework/data_types.h"
#include "core/framework/data_types_internal.h"
namespace onnxruntime {
//TODO:ensure dtype_!=nullptr
#ifdef __GNUC__
#pragma GCC diagnostic push
#ifdef HAS_NULL_DEREFERENCE
#pragma GCC diagnostic ignored "-Wnull-dereference"
#endif
#endif
/*
We want to keep tensor as simple as possible, it is just a placeholder
for a piece of memory, with additional shape information.
Memory is owned and managed by Executor / Workspace, so Tensor just uses
it, and won't do any allocation / release.
*/
class Tensor final {
public:
static std::unique_ptr<Tensor> Create(MLDataType p_type, const TensorShape& shape, std::shared_ptr<IAllocator> allocator) {
return std::make_unique<Tensor>(p_type, shape, allocator);
}
static std::unique_ptr<Tensor> Create(MLDataType p_type, const TensorShape& shape, void* p_data, const OrtMemoryInfo& alloc, ptrdiff_t offset = 0) {
return std::make_unique<Tensor>(p_type, shape, p_data, alloc, offset);
}
Tensor() = default; // to allow creating vector<Tensor> to support seq(tensor)
/**
* Create tensor with given type, shape, pre-allocated memory and allocator info.
* This function won't check if the preallocated buffer(p_data) has enough room for the shape.
* \param p_type Data type of the tensor
* \param shape Shape of the tensor
* \param p_data A preallocated buffer. Can be NULL if the shape is empty.
* Tensor does not own the data and will not delete it
* \param alloc Where the buffer('p_data') was allocated from
* \param offset Offset in bytes to start of Tensor within p_data.
*/
Tensor(MLDataType p_type, const TensorShape& shape, void* p_data, const OrtMemoryInfo& alloc,
ptrdiff_t offset = 0);
/**
* Deprecated. The orginal design is this Tensor class won't do any allocation / release.
* However, this function will allocate the buffer for the shape, and do placement new if p_type is string tensor.
*/
Tensor(MLDataType p_type, const TensorShape& shape, std::shared_ptr<IAllocator> allocator);
/**
* Create tensor with given type, shape, pre-allocated memory and allocator which will be used to free the pre-allocated memory.
* This function won't check if the preallocated buffer(p_data) has enough room for the shape.
* However, this function will de-allocate the buffer upon the tensor getting destructed.
* \param p_type Data type of the tensor
* \param shape Shape of the tensor
* \param p_data A preallocated buffer. Can be NULL if the shape is empty.
* Tensor does not own the data and will not delete it
* \param deleter Allocator used to free the pre-allocated memory
* \param offset Offset in bytes to start of Tensor within p_data.
*/
Tensor(MLDataType p_type, const TensorShape& shape, void* p_data, std::shared_ptr<IAllocator> deleter,
ptrdiff_t offset = 0);
~Tensor();
//Move is allowed
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor);
Tensor(Tensor&& other) noexcept;
Tensor& operator=(Tensor&& other) noexcept;
/**
Returns the data type.
*/
MLDataType DataType() const { return dtype_; }
/**
Returns the data type enum constant
@remarks Use utils::ToTensorProtoElementType<T> for comparison.
*/
int32_t GetElementType() const {
return dtype_->GetDataType();
}
// Check if contains string data. This is a separate
// interface bc it is frequently used.
bool IsDataTypeString() const {
return utils::IsPrimitiveDataType<std::string>(dtype_);
}
// Checks if the Tensor contains data type T
template <class T>
bool IsDataType() const {
return utils::IsPrimitiveDataType<T>(dtype_);
}
/**
Returns the shape of the tensor.
*/
const TensorShape& Shape() const noexcept { return shape_; }
/**
Returns the location of the tensor's memory
*/
const OrtMemoryInfo& Location() const { return alloc_info_; }
/**
May return nullptr if tensor size is zero
*/
template <typename T>
T* MutableData() {
// Type check
ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
"T ", "!=", dtype_);
return reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
}
/**
May return nullptr if tensor size is zero
*/
template <typename T>
gsl::span<T> MutableDataAsSpan() {
// Type check
ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
"T ", "!=", dtype_);
T* data = reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
return gsl::make_span(data, static_cast<size_t>(shape_.Size()));
}
template <typename T>
const T* Data() const {
// Type check
ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
"T ", "!=", dtype_);
return reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
}
template <typename T>
gsl::span<const T> DataAsSpan() const {
// Type check
ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
"T ", "!=", dtype_);
const T* data = reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
return gsl::make_span(data, static_cast<typename gsl::span<T>::index_type>(shape_.Size()));
}
void* MutableDataRaw(MLDataType type) {
ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
return static_cast<char*>(p_data_) + byte_offset_;
}
const void* DataRaw(MLDataType type) const {
ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
return static_cast<char*>(p_data_) + byte_offset_;
}
void* MutableDataRaw() noexcept {
return static_cast<char*>(p_data_) + byte_offset_;
}
const void* DataRaw() const noexcept {
return static_cast<char*>(p_data_) + byte_offset_;
}
bool OwnsBuffer() const noexcept {
return buffer_deleter_ != nullptr;
}
/**
* Resizes the tensor without touching underlying storage.
* This requires the total size of the tensor to remains constant.
* @warning this function is NOT thread-safe.
*/
inline void Reshape(const TensorShape& new_shape) {
ORT_ENFORCE(shape_.Size() == new_shape.Size(),
"Tensor size (" + std::to_string(shape_.Size()) +
") != new size (" + std::to_string(new_shape.Size()) + ")");
shape_ = new_shape;
}
/**
* Get the byte offset with respect to the p_data
* @warning this is a temporary solution for reusing the buffer bigger than needed.
* @warning use with caution - make sure you do boundary check before calling this method (see view.cc)
*/
inline ptrdiff_t ByteOffset() const {
return byte_offset_;
}
/**
* Set the byte offset with respect to the p_data
* @warning this is a temporary solution for reusing the buffer bigger than needed.
*/
inline void SetByteOffset(ptrdiff_t byte_offset) {
byte_offset_ = byte_offset;
}
/**
The number of bytes of data.
*/
size_t SizeInBytes() const;
// More API methods.
private:
void Init(MLDataType p_type,
const TensorShape& shape,
void* p_raw_data,
AllocatorPtr deleter,
ptrdiff_t offset = 0);
void ReleaseBuffer();
void* p_data_;
/**
if buffer_deleter_ is null, it means tensor does not own the buffer.
otherwise tensor will use the deleter to release the buffer when
tensor is released.
*/
AllocatorPtr buffer_deleter_;
TensorShape shape_;
const PrimitiveDataTypeBase* dtype_;
OrtMemoryInfo alloc_info_;
ptrdiff_t byte_offset_;
};
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
} // namespace onnxruntime
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/equivalent.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#ifndef FST_SCRIPT_EQUIVALENT_H_
#define FST_SCRIPT_EQUIVALENT_H_
#include <tuple>
#include <fst/equivalent.h>
#include <fst/script/arg-packs.h>
#include <fst/script/fst-class.h>
namespace fst {
namespace script {
using EquivalentInnerArgs = std::tuple<const FstClass &, const FstClass &,
float>;
using EquivalentArgs = WithReturnValue<bool, EquivalentInnerArgs>;
template <class Arc>
void Equivalent(EquivalentArgs *args) {
const Fst<Arc> &fst1 = *(std::get<0>(args->args).GetFst<Arc>());
const Fst<Arc> &fst2 = *(std::get<1>(args->args).GetFst<Arc>());
args->retval = Equivalent(fst1, fst2, std::get<2>(args->args));
}
bool Equivalent(const FstClass &fst1, const FstClass &fst2,
float delta = kDelta);
} // namespace script
} // namespace fst
#endif // FST_SCRIPT_EQUIVALENT_H_
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/ci_scripts/electronjs_tflite-tests.sh | #!/bin/bash
set -xe
source $(dirname "$0")/all-vars.sh
source $(dirname "$0")/all-utils.sh
source $(dirname "$0")/asserts.sh
samplerate=$1
ldc93s1_sample_filename="LDC93S1_pcms16le_1_${samplerate}.wav"
model_source=${STT_TEST_MODEL}
model_name=$(basename "${model_source}")
download_data
node --version
npm --version
symlink_electron
export_node_bin_path
which electron
which node
if [ "${OS}" = "Linux" ]; then
export DISPLAY=':99.0'
sudo Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
xvfb_process=$!
fi
node --version
stt --version
check_runtime_electronjs
run_electronjs_inference_tests
if [ "${OS}" = "Linux" ]; then
sleep 1
sudo kill -9 ${xvfb_process} || true
fi
| 0 |