repo_id
stringlengths 18
103
| file_path
stringlengths 30
136
| content
stringlengths 2
3.36M
| __index_level_0__
int64 0
0
|
---|---|---|---|
coqui_public_repos/inference-engine/third_party/kenlm/util | coqui_public_repos/inference-engine/third_party/kenlm/util/double-conversion/CMakeLists.txt | # This CMake file was created by Lane Schwartz <dowobeha@gmail.com>
# Explicitly list the source files for this subdirectory
#
# If you add any source files to this subdirectory
# that should be included in the kenlm library,
# (this excludes any unit test files)
# you should add them to the following list:
#
# In order to allow CMake files in the parent directory
# to see this variable definition, we set PARENT_SCOPE.
#
# In order to set correct paths to these files
# when this variable is referenced by CMake files in the parent directory,
# we prefix all files with ${CMAKE_CURRENT_SOURCE_DIR}.
#
set(KENLM_UTIL_DOUBLECONVERSION_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/bignum-dtoa.cc
${CMAKE_CURRENT_SOURCE_DIR}/bignum.cc
${CMAKE_CURRENT_SOURCE_DIR}/cached-powers.cc
${CMAKE_CURRENT_SOURCE_DIR}/diy-fp.cc
${CMAKE_CURRENT_SOURCE_DIR}/double-conversion.cc
${CMAKE_CURRENT_SOURCE_DIR}/fast-dtoa.cc
${CMAKE_CURRENT_SOURCE_DIR}/fixed-dtoa.cc
${CMAKE_CURRENT_SOURCE_DIR}/strtod.cc
PARENT_SCOPE)
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/lib/Makefile.am | AM_CPPFLAGS = -I$(srcdir)/../include $(ICU_CPPFLAGS)
lib_LTLIBRARIES = libfst.la
libfst_la_SOURCES = compat.cc flags.cc fst.cc fst-types.cc mapped-file.cc \
properties.cc symbol-table.cc symbol-table-ops.cc \
weight.cc util.cc
libfst_la_LDFLAGS = -version-info 10:0:0
libfst_la_LIBADD = $(DL_LIBS)
| 0 |
coqui_public_repos/STT-models/greek/itml | coqui_public_repos/STT-models/greek/itml/v0.1.1/alphabet.txt |
ΐ
ά
έ
ή
ί
α
β
γ
δ
ε
ζ
η
θ
ι
κ
λ
μ
ν
ξ
ο
π
ρ
ς
σ
τ
υ
φ
χ
ψ
ω
ϊ
ϋ
ό
ύ
ώ
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/test/weight-tester.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Utility class for regression testing of FST weights.
#ifndef FST_TEST_WEIGHT_TESTER_H_
#define FST_TEST_WEIGHT_TESTER_H_
#include <iostream>
#include <sstream>
#include <utility>
#include <fst/log.h>
#include <fst/weight.h>
namespace fst {
// This class tests a variety of identities and properties that must
// hold for the Weight class to be well-defined. It calls function object
// WEIGHT_GENERATOR to select weights that are used in the tests.
template <class Weight, class WeightGenerator>
class WeightTester {
public:
WeightTester(WeightGenerator generator)
: weight_generator_(std::move(generator)) {}
void Test(int iterations, bool test_division = true) {
for (int i = 0; i < iterations; ++i) {
// Selects the test weights.
const Weight w1(weight_generator_());
const Weight w2(weight_generator_());
const Weight w3(weight_generator_());
VLOG(1) << "weight type = " << Weight::Type();
VLOG(1) << "w1 = " << w1;
VLOG(1) << "w2 = " << w2;
VLOG(1) << "w3 = " << w3;
TestSemiring(w1, w2, w3);
if (test_division) TestDivision(w1, w2);
TestReverse(w1, w2);
TestEquality(w1, w2, w3);
TestIO(w1);
TestCopy(w1);
}
}
private:
// Note in the tests below we use ApproxEqual rather than == and add
// kDelta to inequalities where the weights might be inexact.
// Tests (Plus, Times, Zero, One) defines a commutative semiring.
void TestSemiring(Weight w1, Weight w2, Weight w3) {
// Checks that the operations are closed.
CHECK(Plus(w1, w2).Member());
CHECK(Times(w1, w2).Member());
// Checks that the operations are associative.
CHECK(ApproxEqual(Plus(w1, Plus(w2, w3)), Plus(Plus(w1, w2), w3)));
CHECK(ApproxEqual(Times(w1, Times(w2, w3)), Times(Times(w1, w2), w3)));
// Checks the identity elements.
CHECK(Plus(w1, Weight::Zero()) == w1);
CHECK(Plus(Weight::Zero(), w1) == w1);
CHECK(Times(w1, Weight::One()) == w1);
CHECK(Times(Weight::One(), w1) == w1);
// Check the no weight element.
CHECK(!Weight::NoWeight().Member());
CHECK(!Plus(w1, Weight::NoWeight()).Member());
CHECK(!Plus(Weight::NoWeight(), w1).Member());
CHECK(!Times(w1, Weight::NoWeight()).Member());
CHECK(!Times(Weight::NoWeight(), w1).Member());
// Checks that the operations commute.
CHECK(ApproxEqual(Plus(w1, w2), Plus(w2, w1)));
if (Weight::Properties() & kCommutative)
CHECK(ApproxEqual(Times(w1, w2), Times(w2, w1)));
// Checks Zero() is the annihilator.
CHECK(Times(w1, Weight::Zero()) == Weight::Zero());
CHECK(Times(Weight::Zero(), w1) == Weight::Zero());
// Check Power(w, 0) is Weight::One()
CHECK(Power(w1, 0) == Weight::One());
// Check Power(w, 1) is w
CHECK(Power(w1, 1) == w1);
// Check Power(w, 3) is Times(w, Times(w, w))
CHECK(Power(w1, 3) == Times(w1, Times(w1, w1)));
// Checks distributivity.
if (Weight::Properties() & kLeftSemiring) {
CHECK(ApproxEqual(Times(w1, Plus(w2, w3)),
Plus(Times(w1, w2), Times(w1, w3))));
}
if (Weight::Properties() & kRightSemiring)
CHECK(ApproxEqual(Times(Plus(w1, w2), w3),
Plus(Times(w1, w3), Times(w2, w3))));
if (Weight::Properties() & kIdempotent) CHECK(Plus(w1, w1) == w1);
if (Weight::Properties() & kPath)
CHECK(Plus(w1, w2) == w1 || Plus(w1, w2) == w2);
// Ensure weights form a left or right semiring.
CHECK(Weight::Properties() & (kLeftSemiring | kRightSemiring));
// Check when Times() is commutative that it is marked as a semiring.
if (Weight::Properties() & kCommutative)
CHECK(Weight::Properties() & kSemiring);
}
// Tests division operation.
void TestDivision(Weight w1, Weight w2) {
Weight p = Times(w1, w2);
if (Weight::Properties() & kLeftSemiring) {
Weight d = Divide(p, w1, DIVIDE_LEFT);
if (d.Member()) CHECK(ApproxEqual(p, Times(w1, d)));
CHECK(!Divide(w1, Weight::NoWeight(), DIVIDE_LEFT).Member());
CHECK(!Divide(Weight::NoWeight(), w1, DIVIDE_LEFT).Member());
}
if (Weight::Properties() & kRightSemiring) {
Weight d = Divide(p, w2, DIVIDE_RIGHT);
if (d.Member()) CHECK(ApproxEqual(p, Times(d, w2)));
CHECK(!Divide(w1, Weight::NoWeight(), DIVIDE_RIGHT).Member());
CHECK(!Divide(Weight::NoWeight(), w1, DIVIDE_RIGHT).Member());
}
if (Weight::Properties() & kCommutative) {
Weight d = Divide(p, w1, DIVIDE_RIGHT);
if (d.Member()) CHECK(ApproxEqual(p, Times(d, w1)));
}
}
// Tests reverse operation.
void TestReverse(Weight w1, Weight w2) {
typedef typename Weight::ReverseWeight ReverseWeight;
ReverseWeight rw1 = w1.Reverse();
ReverseWeight rw2 = w2.Reverse();
CHECK(rw1.Reverse() == w1);
CHECK(Plus(w1, w2).Reverse() == Plus(rw1, rw2));
CHECK(Times(w1, w2).Reverse() == Times(rw2, rw1));
}
// Tests == is an equivalence relation.
void TestEquality(Weight w1, Weight w2, Weight w3) {
// Checks reflexivity.
CHECK(w1 == w1);
// Checks symmetry.
CHECK((w1 == w2) == (w2 == w1));
// Checks transitivity.
if (w1 == w2 && w2 == w3) CHECK(w1 == w3);
}
// Tests binary serialization and textual I/O.
void TestIO(Weight w) {
// Tests binary I/O
{
std::ostringstream os;
w.Write(os);
os.flush();
std::istringstream is(os.str());
Weight v;
v.Read(is);
CHECK_EQ(w, v);
}
// Tests textual I/O.
{
std::ostringstream os;
os << w;
std::istringstream is(os.str());
Weight v(Weight::One());
is >> v;
CHECK(ApproxEqual(w, v));
}
}
// Tests copy constructor and assignment operator
void TestCopy(Weight w) {
Weight x = w;
CHECK(w == x);
x = Weight(w);
CHECK(w == x);
x.operator=(x);
CHECK(w == x);
}
// Generates weights used in testing.
WeightGenerator weight_generator_;
};
} // namespace fst
#endif // FST_TEST_WEIGHT_TESTER_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstinfo.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_string(arc_filter, "any",
"Arc filter: one of:"
" \"any\", \"epsilon\", \"iepsilon\", \"oepsilon\"; "
"this only affects the counts of (co)accessible states, "
"connected states, and (strongly) connected components");
DEFINE_string(info_type, "auto",
"Info format: one of: \"auto\", \"long\", \"short\"");
DEFINE_bool(pipe, false, "Send info to stderr, input to stdout");
DEFINE_bool(test_properties, true,
"Compute property values (if unknown to FST)");
DEFINE_bool(fst_verify, true, "Verify FST sanity");
int fstinfo_main(int argc, char **argv);
int main(int argc, char **argv) { return fstinfo_main(argc, argv); }
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/fst-class.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// These classes are only recommended for use in high-level scripting
// applications. Most users should use the lower-level templated versions
// corresponding to these classes.
#include <istream>
#include <fst/log.h>
#include <fst/equal.h>
#include <fst/fst-decl.h>
#include <fst/reverse.h>
#include <fst/union.h>
#include <fst/script/fst-class.h>
#include <fst/script/register.h>
namespace fst {
namespace script {
// Registration.
REGISTER_FST_CLASSES(StdArc);
REGISTER_FST_CLASSES(LogArc);
REGISTER_FST_CLASSES(Log64Arc);
// FstClass methods.
namespace {
template <class F>
F *ReadFst(std::istream &istrm, const string &fname) {
if (!istrm) {
LOG(ERROR) << "ReadFst: Can't open file: " << fname;
return nullptr;
}
FstHeader hdr;
if (!hdr.Read(istrm, fname)) return nullptr;
const FstReadOptions read_options(fname, &hdr);
const auto &arc_type = hdr.ArcType();
static const auto *io_register = IORegistration<F>::Register::GetRegister();
const auto reader = io_register->GetReader(arc_type);
if (!reader) {
LOG(ERROR) << "ReadFst: Unknown arc type: " << arc_type;
return nullptr;
}
return reader(istrm, read_options);
}
} // namespace
FstClass *FstClass::Read(const string &fname) {
if (!fname.empty()) {
std::ifstream istrm(fname, std::ios_base::in | std::ios_base::binary);
return ReadFst<FstClass>(istrm, fname);
} else {
return ReadFst<FstClass>(std::cin, "standard input");
}
}
FstClass *FstClass::Read(std::istream &istrm, const string &source) {
return ReadFst<FstClass>(istrm, source);
}
bool FstClass::WeightTypesMatch(const WeightClass &weight,
const string &op_name) const {
if (WeightType() != weight.Type()) {
FSTERROR() << "FST and weight with non-matching weight types passed to "
<< op_name << ": " << WeightType() << " and " << weight.Type();
return false;
}
return true;
}
// MutableFstClass methods.
MutableFstClass *MutableFstClass::Read(const string &fname, bool convert) {
if (convert == false) {
if (!fname.empty()) {
std::ifstream in(fname, std::ios_base::in | std::ios_base::binary);
return ReadFst<MutableFstClass>(in, fname);
} else {
return ReadFst<MutableFstClass>(std::cin, "standard input");
}
} else { // Converts to VectorFstClass if not mutable.
std::unique_ptr<FstClass> ifst(FstClass::Read(fname));
if (!ifst) return nullptr;
if (ifst->Properties(kMutable, false) == kMutable) {
return static_cast<MutableFstClass *>(ifst.release());
} else {
return new VectorFstClass(*ifst.release());
}
}
}
// VectorFstClass methods.
VectorFstClass *VectorFstClass::Read(const string &fname) {
if (!fname.empty()) {
std::ifstream in(fname, std::ios_base::in | std::ios_base::binary);
return ReadFst<VectorFstClass>(in, fname);
} else {
return ReadFst<VectorFstClass>(std::cin, "standard input");
}
}
IORegistration<VectorFstClass>::Entry GetVFSTRegisterEntry(
const string &arc_type) {
static const auto *io_register =
IORegistration<VectorFstClass>::Register::GetRegister();
return io_register->GetEntry(arc_type);
}
VectorFstClass::VectorFstClass(const string &arc_type)
: MutableFstClass(GetVFSTRegisterEntry(arc_type).creator()) {
if (Properties(kError, true) == kError) {
FSTERROR() << "VectorFstClass: Unknown arc type: " << arc_type;
}
}
VectorFstClass::VectorFstClass(const FstClass &other)
: MutableFstClass(GetVFSTRegisterEntry(other.ArcType()).converter(other)) {}
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party | coqui_public_repos/inference-engine/third_party/tensorflow/mfcc_dct.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Basic minimal DCT class for MFCC speech processing.
#ifndef TENSORFLOW_CORE_KERNELS_MFCC_DCT_H_
#define TENSORFLOW_CORE_KERNELS_MFCC_DCT_H_
#include <vector>
#define TF_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
void operator=(const TypeName&) = delete
namespace tensorflow {
class MfccDct {
public:
MfccDct();
bool Initialize(int input_length, int coefficient_count);
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
bool initialized_;
int coefficient_count_;
int input_length_;
std::vector<std::vector<double> > cosines_;
TF_DISALLOW_COPY_AND_ASSIGN(MfccDct);
};
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_MFCC_DCT_H_
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/xtts_tests/test_xtts_v2-0_gpt_train.py | import os
import shutil
import torch
from trainer import Trainer, TrainerArgs
from tests import get_tests_output_path
from TTS.config.shared_configs import BaseDatasetConfig
from TTS.tts.datasets import load_tts_samples
from TTS.tts.layers.xtts.dvae import DiscreteVAE
from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig
config_dataset = BaseDatasetConfig(
formatter="ljspeech",
dataset_name="ljspeech",
path="tests/data/ljspeech/",
meta_file_train="metadata.csv",
meta_file_val="metadata.csv",
language="en",
)
DATASETS_CONFIG_LIST = [config_dataset]
# Logging parameters
RUN_NAME = "GPT_XTTS_LJSpeech_FT"
PROJECT_NAME = "XTTS_trainer"
DASHBOARD_LOGGER = "tensorboard"
LOGGER_URI = None
OUT_PATH = os.path.join(get_tests_output_path(), "train_outputs", "xtts_tests")
os.makedirs(OUT_PATH, exist_ok=True)
# Create DVAE checkpoint and mel_norms on test time
# DVAE parameters: For the training we need the dvae to extract the dvae tokens, given that you must provide the paths for this model
DVAE_CHECKPOINT = os.path.join(OUT_PATH, "dvae.pth") # DVAE checkpoint
# Mel spectrogram norms, required for dvae mel spectrogram extraction
MEL_NORM_FILE = os.path.join(OUT_PATH, "mel_stats.pth")
dvae = DiscreteVAE(
channels=80,
normalization=None,
positional_dims=1,
num_tokens=8192,
codebook_dim=512,
hidden_dim=512,
num_resnet_blocks=3,
kernel_size=3,
num_layers=2,
use_transposed_convs=False,
)
torch.save(dvae.state_dict(), DVAE_CHECKPOINT)
mel_stats = torch.ones(80)
torch.save(mel_stats, MEL_NORM_FILE)
# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
TOKENIZER_FILE = "tests/inputs/xtts_vocab.json" # vocab.json file
XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_style_emb_repetition_fix_gt/132500_gpt_ema_coqui_tts_with_enhanced_hifigan.pth" # model.pth file
# Training sentences generations
SPEAKER_REFERENCE = [
"tests/data/ljspeech/wavs/LJ001-0002.wav"
] # speaker reference to be used in training test sentences
LANGUAGE = config_dataset.language
# Training Parameters
OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False
START_WITH_EVAL = False # if True it will star with evaluation
BATCH_SIZE = 2 # set here the batch size
GRAD_ACUMM_STEPS = 1 # set here the grad accumulation steps
# Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly.
# init args and config
model_args = GPTArgs(
max_conditioning_length=132300, # 6 secs
min_conditioning_length=66150, # 3 secs
debug_loading_failures=False,
max_wav_length=255995, # ~11.6 seconds
max_text_length=200,
mel_norm_file=MEL_NORM_FILE,
dvae_checkpoint=DVAE_CHECKPOINT,
xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune
tokenizer_file=TOKENIZER_FILE,
gpt_num_audio_tokens=8194,
gpt_start_audio_token=8192,
gpt_stop_audio_token=8193,
gpt_use_masking_gt_prompt_approach=True,
gpt_use_perceiver_resampler=True,
)
audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000)
config = GPTTrainerConfig(
epochs=1,
output_path=OUT_PATH,
model_args=model_args,
run_name=RUN_NAME,
project_name=PROJECT_NAME,
run_description="GPT XTTS training",
dashboard_logger=DASHBOARD_LOGGER,
logger_uri=LOGGER_URI,
audio=audio_config,
batch_size=BATCH_SIZE,
batch_group_size=48,
eval_batch_size=BATCH_SIZE,
num_loader_workers=8,
eval_split_max_size=256,
print_step=50,
plot_step=100,
log_model_step=1000,
save_step=10000,
save_n_checkpoints=1,
save_checkpoints=True,
# target_loss="loss",
print_eval=False,
# Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters.
optimizer="AdamW",
optimizer_wd_only_on_weights=OPTIMIZER_WD_ONLY_ON_WEIGHTS,
optimizer_params={"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2},
lr=5e-06, # learning rate
lr_scheduler="MultiStepLR",
# it was adjusted accordly for the new step scheme
lr_scheduler_params={"milestones": [50000 * 18, 150000 * 18, 300000 * 18], "gamma": 0.5, "last_epoch": -1},
test_sentences=[
{
"text": "This cake is great. It's so delicious and moist.",
"speaker_wav": SPEAKER_REFERENCE,
"language": LANGUAGE,
},
],
)
# init the model from config
model = GPTTrainer.init_from_config(config)
# load training samples
train_samples, eval_samples = load_tts_samples(
DATASETS_CONFIG_LIST,
eval_split=True,
eval_split_max_size=config.eval_split_max_size,
eval_split_size=config.eval_split_size,
)
# init the trainer and 🚀
trainer = Trainer(
TrainerArgs(
restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter
skip_train_epoch=False,
start_with_eval=True,
grad_accum_steps=GRAD_ACUMM_STEPS,
),
config,
output_path=OUT_PATH,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
)
trainer.fit()
# remove output path
shutil.rmtree(OUT_PATH)
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/test-properties.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions to manipulate and test property bits.
#ifndef FST_TEST_PROPERTIES_H_
#define FST_TEST_PROPERTIES_H_
#include <unordered_set>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/connect.h>
#include <fst/dfs-visit.h>
DECLARE_bool(fst_verify_properties);
namespace fst {
// namespace internal {
// For a binary property, the bit is always returned set. For a trinary (i.e.,
// two-bit) property, both bits are returned set iff either corresponding input
// bit is set.
inline uint64_t KnownProperties(uint64_t props) {
return kBinaryProperties | (props & kTrinaryProperties) |
((props & kPosTrinaryProperties) << 1) |
((props & kNegTrinaryProperties) >> 1);
}
// Tests compatibility between two sets of properties.
inline bool CompatProperties(uint64_t props1, uint64_t props2) {
const auto known_props1 = KnownProperties(props1);
const auto known_props2 = KnownProperties(props2);
const auto known_props = known_props1 & known_props2;
const auto incompat_props = (props1 & known_props) ^ (props2 & known_props);
if (incompat_props) {
uint64_t prop = 1;
for (int i = 0; i < 64; ++i, prop <<= 1) {
if (prop & incompat_props) {
LOG(ERROR) << "CompatProperties: Mismatch: " << PropertyNames[i]
<< ": props1 = " << (props1 & prop ? "true" : "false")
<< ", props2 = " << (props2 & prop ? "true" : "false");
}
}
return false;
} else {
return true;
}
}
// Computes FST property values defined in properties.h. The value of each
// property indicated in the mask will be determined and returned (these will
// never be unknown here). In the course of determining the properties
// specifically requested in the mask, certain other properties may be
// determined (those with little additional expense) and their values will be
// returned as well. The complete set of known properties (whether true or
// false) determined by this operation will be assigned to the the value pointed
// to by KNOWN. If 'use_stored' is true, pre-computed FST properties may be used
// when possible. 'mask & required_mask' is used to determine whether the stored
// propertoes can be used. This routine is seldom called directly; instead it is
// used to implement fst.Properties(mask, true).
template <class Arc>
uint64_t ComputeProperties(const Fst<Arc> &fst, uint64_t mask, uint64_t *known,
bool use_stored) {
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
const auto fst_props = fst.Properties(kFstProperties, false); // FST-stored.
// Check stored FST properties first if allowed.
if (use_stored) {
const auto known_props = KnownProperties(fst_props);
// If FST contains required info, return it.
if ((known_props & mask) == mask) {
if (known) *known = known_props;
return fst_props;
}
}
// Computes (trinary) properties explicitly.
// Initialize with binary properties (already known).
uint64_t comp_props = fst_props & kBinaryProperties;
// Computes these trinary properties with a DFS. We compute only those that
// need a DFS here, since we otherwise would like to avoid a DFS since its
// stack could grow large.
uint64_t dfs_props = kCyclic | kAcyclic | kInitialCyclic | kInitialAcyclic |
kAccessible | kNotAccessible | kCoAccessible |
kNotCoAccessible;
std::vector<StateId> scc;
if (mask & (dfs_props | kWeightedCycles | kUnweightedCycles)) {
SccVisitor<Arc> scc_visitor(&scc, nullptr, nullptr, &comp_props);
DfsVisit(fst, &scc_visitor);
}
// Computes any remaining trinary properties via a state and arcs iterations
if (mask & ~(kBinaryProperties | dfs_props)) {
comp_props |= kAcceptor | kNoEpsilons | kNoIEpsilons | kNoOEpsilons |
kILabelSorted | kOLabelSorted | kUnweighted | kTopSorted |
kString;
if (mask & (kIDeterministic | kNonIDeterministic)) {
comp_props |= kIDeterministic;
}
if (mask & (kODeterministic | kNonODeterministic)) {
comp_props |= kODeterministic;
}
if (mask & (dfs_props | kWeightedCycles | kUnweightedCycles)) {
comp_props |= kUnweightedCycles;
}
std::unique_ptr<std::unordered_set<Label>> ilabels;
std::unique_ptr<std::unordered_set<Label>> olabels;
StateId nfinal = 0;
for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) {
StateId s = siter.Value();
Arc prev_arc;
// Creates these only if we need to.
if (mask & (kIDeterministic | kNonIDeterministic)) {
ilabels.reset(new std::unordered_set<Label>());
}
if (mask & (kODeterministic | kNonODeterministic)) {
olabels.reset(new std::unordered_set<Label>());
}
bool first_arc = true;
for (ArcIterator<Fst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) {
const auto &arc = aiter.Value();
if (ilabels && ilabels->find(arc.ilabel) != ilabels->end()) {
comp_props |= kNonIDeterministic;
comp_props &= ~kIDeterministic;
}
if (olabels && olabels->find(arc.olabel) != olabels->end()) {
comp_props |= kNonODeterministic;
comp_props &= ~kODeterministic;
}
if (arc.ilabel != arc.olabel) {
comp_props |= kNotAcceptor;
comp_props &= ~kAcceptor;
}
if (arc.ilabel == 0 && arc.olabel == 0) {
comp_props |= kEpsilons;
comp_props &= ~kNoEpsilons;
}
if (arc.ilabel == 0) {
comp_props |= kIEpsilons;
comp_props &= ~kNoIEpsilons;
}
if (arc.olabel == 0) {
comp_props |= kOEpsilons;
comp_props &= ~kNoOEpsilons;
}
if (!first_arc) {
if (arc.ilabel < prev_arc.ilabel) {
comp_props |= kNotILabelSorted;
comp_props &= ~kILabelSorted;
}
if (arc.olabel < prev_arc.olabel) {
comp_props |= kNotOLabelSorted;
comp_props &= ~kOLabelSorted;
}
}
if (arc.weight != Weight::One() && arc.weight != Weight::Zero()) {
comp_props |= kWeighted;
comp_props &= ~kUnweighted;
if ((comp_props & kUnweightedCycles) &&
scc[s] == scc[arc.nextstate]) {
comp_props |= kWeightedCycles;
comp_props &= ~kUnweightedCycles;
}
}
if (arc.nextstate <= s) {
comp_props |= kNotTopSorted;
comp_props &= ~kTopSorted;
}
if (arc.nextstate != s + 1) {
comp_props |= kNotString;
comp_props &= ~kString;
}
prev_arc = arc;
first_arc = false;
if (ilabels) ilabels->insert(arc.ilabel);
if (olabels) olabels->insert(arc.olabel);
}
if (nfinal > 0) { // Final state not last.
comp_props |= kNotString;
comp_props &= ~kString;
}
const auto final_weight = fst.Final(s);
if (final_weight != Weight::Zero()) { // Final state.
if (final_weight != Weight::One()) {
comp_props |= kWeighted;
comp_props &= ~kUnweighted;
}
++nfinal;
} else { // Non-final state.
if (fst.NumArcs(s) != 1) {
comp_props |= kNotString;
comp_props &= ~kString;
}
}
}
if (fst.Start() != kNoStateId && fst.Start() != 0) {
comp_props |= kNotString;
comp_props &= ~kString;
}
}
if (known) *known = KnownProperties(comp_props);
return comp_props;
}
// This is a wrapper around ComputeProperties that will cause a fatal error if
// the stored properties and the computed properties are incompatible when
// FLAGS_fst_verify_properties is true. This routine is seldom called directly;
// instead it is used to implement fst.Properties(mask, true).
template <class Arc>
uint64_t TestProperties(const Fst<Arc> &fst, uint64_t mask, uint64_t *known) {
if (FLAGS_fst_verify_properties) {
const auto stored_props = fst.Properties(kFstProperties, false);
const auto computed_props = ComputeProperties(fst, mask, known, false);
if (!CompatProperties(stored_props, computed_props)) {
FSTERROR() << "TestProperties: stored FST properties incorrect"
<< " (stored: props1, computed: props2)";
}
return computed_props;
} else {
return ComputeProperties(fst, mask, known, true);
}
}
// If all the properties of 'fst' corresponding to 'check_mask' are known,
// returns the stored properties. Otherwise, the properties corresponding to
// both 'check_mask' and 'test_mask' are computed. This is used to check for
// newly-added properties that might not be set in old binary files.
template <class Arc>
uint64_t CheckProperties(const Fst<Arc> &fst, uint64_t check_mask,
uint64_t test_mask) {
auto props = fst.Properties(kFstProperties, false);
if (FLAGS_fst_verify_properties) {
props = TestProperties(fst, check_mask | test_mask, nullptr);
} else if ((KnownProperties(props) & check_mask) != check_mask) {
props = ComputeProperties(fst, check_mask | test_mask, nullptr, false);
}
return props & (check_mask | test_mask);
}
//} // namespace internal
} // namespace fst
#endif // FST_TEST_PROPERTIES_H_
| 0 |
coqui_public_repos/stt-model-manager | coqui_public_repos/stt-model-manager/coqui_stt_model_manager/server.py | """Server hosting the STT UI"""
import json
import logging
import os
import sys
import threading
import webbrowser
from collections import deque
from datetime import datetime, timedelta
from pathlib import Path
from queue import Queue
from typing import Optional, Tuple
import numpy as np
from engineio.payload import Payload
from flask import Flask, jsonify, redirect, render_template, request, session, url_for
from flask_cors import CORS
from flask_socketio import SocketIO
from stt import Model
from webrtcvad import Vad
from .modelmanager import ModelCard, ModelManager
Payload.max_decode_packets = 10000
app = Flask(__name__)
CORS(app, origins=["https://coqui.ai"])
socketio = SocketIO(app)
VAD = Vad(3) # Very aggressive
SILENCE_THRESHOLD = timedelta(milliseconds=200)
_server_initialized = threading.Event()
def is_debug() -> bool:
return "COQUI_STT_MODEL_MANAGER_DEBUG" in os.environ or "--debug" in sys.argv
def open_folder(path: Path):
assert path.is_dir()
assert path.is_absolute()
webbrowser.open(f"file://{path}")
def get_server_hostport() -> Tuple[str, int]:
_server_initialized.wait()
assert (
"SERVER_HOST" in app.config
), "server not initialized (should never happen due to wait above)"
assert (
"SERVER_PORT" in app.config
), "server not initialized (should never happen due to wait above)"
return (app.config["SERVER_HOST"], app.config["SERVER_PORT"])
@app.route("/")
def index():
host, port = get_server_hostport()
current_installs = [
task.to_dict()
for task in app.config["MODEL_MANAGER"].install_tasks.values()
if task.total_progress < 100
]
print(f"Current installs: {current_installs}")
return render_template(
"index.html",
model_zoo_url=f"https://coqui.ai/models?callback_url=http://{host}:{port}/install_model&prefer_tflite=1",
installed_models=list(app.config["MODEL_MANAGER"].list_models()),
models_being_installed=current_installs,
)
@app.route("/install_model", methods=["POST"])
def install_model():
print(f"Install model got data: {request.data}")
model_card = json.loads(request.data)
app.config["MODEL_MANAGER"].download_model(model_card)
return redirect(url_for("index"))
@app.route("/show_model_files/<string:model_name>")
def show_model_files(model_name):
if model_name not in app.config["MODEL_MANAGER"].models_dict():
return (404, "Not found")
model_card = app.config["MODEL_MANAGER"].models_dict()[model_name]
open_folder(Path(model_card.acoustic_path).parent)
return redirect(url_for("index"))
@socketio.on("start")
def on_connect(model_name):
print(f"Starting session for model {model_name}")
model_card = app.config["MODEL_MANAGER"].models_dict()[model_name]
instance = TranscriptionInstance(request.sid, model_card)
instance.start()
session[request.sid] = instance
@socketio.on("stream-data")
def on_stream_data(data):
instance = session[request.sid]
instance.process_data(data)
@socketio.on("stream-reset")
def on_stream_reset():
instance = session[request.sid]
instance.stream_reset()
@socketio.on("stream-intermediate")
def on_stream_intermediate():
instance = session[request.sid]
instance.stream_intermediate()
def _reset_silence_buffers() -> deque:
return deque(maxlen=3)
class TranscriptionInstance(threading.Thread):
"""Thread responsible for transcribing data for a single transcription instance
(which corresponds to a SocketIO session - see `on_connect`).
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, session_id: str, model_card: ModelCard):
super().__init__(daemon=True)
self.sid = session_id
self.model_card = model_card
self.model = None
self.stream = None
self.recorded_chunks = 0
self.silence_start = None
self.silence_buffers: deque = _reset_silence_buffers()
self.queue: Queue = Queue()
def process_data(self, data):
self.queue.put(("data", data))
def _process_data(self, data):
if VAD.is_speech(data, 16000):
self._process_voice(data)
else:
self._process_silence(data)
def stream_reset(self):
self.queue.put(("reset", None))
def _stream_reset(self):
print(f"[{self.sid}:reset]")
self.stream.finishStream() # ignore results
self.stream = self.model.createStream()
self.recorded_chunks = 0
self.silence_start = None
def stream_intermediate(self):
self.queue.put(("intermediate", None))
def _stream_intermediate(self):
result = self.stream.intermediateDecode()
if result:
socketio.emit("intermediate", {"text": result}, to=self.sid)
def _process_voice(self, data):
data = np.frombuffer(data, np.int16)
self.silence_start = None
if self.recorded_chunks == 0:
print(f"\n[{self.sid}:start]", end="", flush=True) # recording started
else:
print("=", end="", flush=True) # still recording
self.recorded_chunks += 1
data_with_silence = self._add_buffered_silence(data)
self.silence_buffers = _reset_silence_buffers()
self.stream.feedAudioContent(data_with_silence)
def _add_buffered_silence(self, data):
return np.concatenate((*self.silence_buffers, data))
def _process_silence(self, data):
data = np.frombuffer(data, np.int16)
if self.recorded_chunks > 0: # recording is on
print("-", end="", flush=True) # silence detected while recording
self.stream.feedAudioContent(data)
if self.silence_start is None:
self.silence_start = datetime.now()
else:
now = datetime.now()
if now - self.silence_start > SILENCE_THRESHOLD:
self.silence_start = None
print(f"[{self.sid}:end]")
result = self.stream.finishStream()
self.stream = self.model.createStream()
self.silence_buffers = _reset_silence_buffers()
if result:
print(f"Recognized text: {result} (len={len(result)})")
socketio.emit("recognize", {"text": result}, to=self.sid)
else:
print(".", end="", flush=True) # silence detected while not recording
# VAD has a tendency to cut the first bit of audio data from the
# start of a recording so keep a buffer of that first bit of audio
# and reinsert it to the beginning of the recording.
self.silence_buffers.append(data)
def exit(self):
self.queue.put(("exit", None))
def run(self):
print(f"Creating model instance from {self.model_card.acoustic_path}")
self.model = Model(str(self.model_card.acoustic_path))
if self.model_card.scorer_path:
print(f"Enabling external scorer from {self.model_card.scorer_path}")
self.model.enableExternalScorer(str(self.model_card.scorer_path))
self.stream = self.model.createStream()
while True:
cmd, data = self.queue.get()
if cmd == "exit":
break
if cmd == "data":
self._process_data(data)
elif cmd == "reset":
self._stream_reset()
elif cmd == "intermediate":
self._stream_intermediate()
@app.route("/installs_progress")
def get_progress_for_install():
tasks = [
task.to_dict() for task in app.config["MODEL_MANAGER"].install_tasks.values()
]
return jsonify(tasks)
@app.route("/transcribe/<string:model_name>")
def transcribe_with_model(model_name: str):
if model_name not in app.config["MODEL_MANAGER"].models_dict():
return (404, "Model not found")
model_card = app.config["MODEL_MANAGER"].models_dict()[model_name]
scorer_basename = ""
if model_card.scorer_path:
scorer_basename = Path(model_card.scorer_path).stem
return render_template(
"transcribe.html", model_card=model_card, scorer_basename=scorer_basename
)
def build_app(
host: str = "127.0.0.1",
port: int = 38450,
testing: bool = False,
install_dir: Optional[Path] = None,
):
if not is_debug():
werkzeug_log = logging.getLogger("werkzeug")
werkzeug_log.setLevel(logging.ERROR)
if testing:
app.config["TESTING"] = True
app.config["MODEL_MANAGER"] = ModelManager(install_dir)
app.config["SERVER_HOST"] = host
app.config["SERVER_PORT"] = port
app.secret_key = b"aeiou"
_server_initialized.set()
# Migrations
app.config["MODEL_MANAGER"].maybe_upgrade_protobuf()
return app
def start_app(app_instance: Flask):
host, port = get_server_hostport()
socketio.run(
app_instance,
host=host,
port=port,
debug=is_debug(),
use_reloader=is_debug(), # Disable reloader to avoid problems when running the server from a thread
)
| 0 |
coqui_public_repos/STT/native_client/kenlm/lm | coqui_public_repos/STT/native_client/kenlm/lm/common/renumber.cc | #include "renumber.hh"
#include "ngram.hh"
#include "../../util/stream/stream.hh"
namespace lm {
void Renumber::Run(const util::stream::ChainPosition &position) {
for (util::stream::Stream stream(position); stream; ++stream) {
NGramHeader gram(stream.Get(), order_);
for (WordIndex *w = gram.begin(); w != gram.end(); ++w) {
*w = new_numbers_[*w];
}
}
}
} // namespace lm
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/INSTALL | Installation Instructions
*************************
Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005,
2006, 2007 Free Software Foundation, Inc.
This file is free documentation; the Free Software Foundation gives
unlimited permission to copy, distribute and modify it.
Basic Installation
==================
Briefly, the shell commands `./configure; make; make install' should
configure, build, and install this package. The following
more-detailed instructions are generic; see the `README' file for
instructions specific to this package.
The `configure' shell script attempts to guess correct values for
various system-dependent variables used during compilation. It uses
those values to create a `Makefile' in each directory of the package.
It may also create one or more `.h' files containing system-dependent
definitions. Finally, it creates a shell script `config.status' that
you can run in the future to recreate the current configuration, and a
file `config.log' containing compiler output (useful mainly for
debugging `configure').
It can also use an optional file (typically called `config.cache'
and enabled with `--cache-file=config.cache' or simply `-C') that saves
the results of its tests to speed up reconfiguring. Caching is
disabled by default to prevent problems with accidental use of stale
cache files.
If you need to do unusual things to compile the package, please try
to figure out how `configure' could check whether to do them, and mail
diffs or instructions to the address given in the `README' so they can
be considered for the next release. If you are using the cache, and at
some point `config.cache' contains results you don't want to keep, you
may remove or edit it.
The file `configure.ac' (or `configure.in') is used to create
`configure' by a program called `autoconf'. You need `configure.ac' if
you want to change it or regenerate `configure' using a newer version
of `autoconf'.
The simplest way to compile this package is:
1. `cd' to the directory containing the package's source code and type
`./configure' to configure the package for your system.
Running `configure' might take a while. While running, it prints
some messages telling which features it is checking for.
2. Type `make' to compile the package.
3. Optionally, type `make check' to run any self-tests that come with
the package.
4. Type `make install' to install the programs and any data files and
documentation.
5. You can remove the program binaries and object files from the
source code directory by typing `make clean'. To also remove the
files that `configure' created (so you can compile the package for
a different kind of computer), type `make distclean'. There is
also a `make maintainer-clean' target, but that is intended mainly
for the package's developers. If you use it, you may have to get
all sorts of other programs in order to regenerate files that came
with the distribution.
6. Often, you can also type `make uninstall' to remove the installed
files again.
Compilers and Options
=====================
Some systems require unusual options for compilation or linking that the
`configure' script does not know about. Run `./configure --help' for
details on some of the pertinent environment variables.
You can give `configure' initial values for configuration parameters
by setting variables in the command line or in the environment. Here
is an example:
./configure CC=c99 CFLAGS=-g LIBS=-lposix
*Note Defining Variables::, for more details.
Compiling For Multiple Architectures
====================================
You can compile the package for more than one kind of computer at the
same time, by placing the object files for each architecture in their
own directory. To do this, you can use GNU `make'. `cd' to the
directory where you want the object files and executables to go and run
the `configure' script. `configure' automatically checks for the
source code in the directory that `configure' is in and in `..'.
With a non-GNU `make', it is safer to compile the package for one
architecture at a time in the source code directory. After you have
installed the package for one architecture, use `make distclean' before
reconfiguring for another architecture.
Installation Names
==================
By default, `make install' installs the package's commands under
`/usr/local/bin', include files under `/usr/local/include', etc. You
can specify an installation prefix other than `/usr/local' by giving
`configure' the option `--prefix=PREFIX'.
You can specify separate installation prefixes for
architecture-specific files and architecture-independent files. If you
pass the option `--exec-prefix=PREFIX' to `configure', the package uses
PREFIX as the prefix for installing programs and libraries.
Documentation and other data files still use the regular prefix.
In addition, if you use an unusual directory layout you can give
options like `--bindir=DIR' to specify different values for particular
kinds of files. Run `configure --help' for a list of the directories
you can set and what kinds of files go in them.
If the package supports it, you can cause programs to be installed
with an extra prefix or suffix on their names by giving `configure' the
option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
Optional Features
=================
Some packages pay attention to `--enable-FEATURE' options to
`configure', where FEATURE indicates an optional part of the package.
They may also pay attention to `--with-PACKAGE' options, where PACKAGE
is something like `gnu-as' or `x' (for the X Window System). The
`README' should mention any `--enable-' and `--with-' options that the
package recognizes.
For packages that use the X Window System, `configure' can usually
find the X include and library files automatically, but if it doesn't,
you can use the `configure' options `--x-includes=DIR' and
`--x-libraries=DIR' to specify their locations.
Specifying the System Type
==========================
There may be some features `configure' cannot figure out automatically,
but needs to determine by the type of machine the package will run on.
Usually, assuming the package is built to be run on the _same_
architectures, `configure' can figure that out, but if it prints a
message saying it cannot guess the machine type, give it the
`--build=TYPE' option. TYPE can either be a short name for the system
type, such as `sun4', or a canonical name which has the form:
CPU-COMPANY-SYSTEM
where SYSTEM can have one of these forms:
OS KERNEL-OS
See the file `config.sub' for the possible values of each field. If
`config.sub' isn't included in this package, then this package doesn't
need to know the machine type.
If you are _building_ compiler tools for cross-compiling, you should
use the option `--target=TYPE' to select the type of system they will
produce code for.
If you want to _use_ a cross compiler, that generates code for a
platform different from the build platform, you should specify the
"host" platform (i.e., that on which the generated programs will
eventually be run) with `--host=TYPE'.
Sharing Defaults
================
If you want to set default values for `configure' scripts to share, you
can create a site shell script called `config.site' that gives default
values for variables like `CC', `cache_file', and `prefix'.
`configure' looks for `PREFIX/share/config.site' if it exists, then
`PREFIX/etc/config.site' if it exists. Or, you can set the
`CONFIG_SITE' environment variable to the location of the site script.
A warning: not all `configure' scripts look for a site script.
Defining Variables
==================
Variables not defined in a site shell script can be set in the
environment passed to `configure'. However, some packages may run
configure again during the build, and the customized values of these
variables may be lost. In order to avoid this problem, you should set
them in the `configure' command line, using `VAR=value'. For example:
./configure CC=/usr/local2/bin/gcc
causes the specified `gcc' to be used as the C compiler (unless it is
overridden in the site shell script).
Unfortunately, this technique does not work for `CONFIG_SHELL' due to
an Autoconf bug. Until the bug is fixed you can use this workaround:
CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
`configure' Invocation
======================
`configure' recognizes the following options to control how it operates.
`--help'
`-h'
Print a summary of the options to `configure', and exit.
`--version'
`-V'
Print the version of Autoconf used to generate the `configure'
script, and exit.
`--cache-file=FILE'
Enable the cache: use and save the results of the tests in FILE,
traditionally `config.cache'. FILE defaults to `/dev/null' to
disable caching.
`--config-cache'
`-C'
Alias for `--cache-file=config.cache'.
`--quiet'
`--silent'
`-q'
Do not print messages saying which checks are being made. To
suppress all normal output, redirect it to `/dev/null' (any error
messages will still be shown).
`--srcdir=DIR'
Look for the package's source code in directory DIR. Usually
`configure' can determine that directory automatically.
`configure' also accepts some other, not widely useful, options. Run
`configure --help' for more details.
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-generate_scorer-android-28-x86_64-opt.yml | build:
template_file: test-android-opt-base.tyml
dependencies:
- "android-x86_64-cpu-opt"
- "kenlm_android-x86_64-cpu-opt"
- "android-cache-x86_64-android-28"
cache:
url: ${system.android_cache.x86_64.android_28.url}
namespace: ${system.android_cache.x86_64.android_28.namespace}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-scorer-tests.sh ${system.kenlm.android_x86_64_cpu.url} android x86_64 android-28"
workerType: "${docker.dsTests}"
metadata:
name: "Testing DeepSpeech Android 9.0 x86-64 CPU generate scorer"
description: "Generate a DeepSpeech Scorer for Android 9.0/x86-64, CPU only, optimized version"
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/native_client/gen_workspace_status.sh | #!/bin/bash
set -x
tf_git_version=$(grep "STABLE_TF_GIT_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
ds_version=$(grep "STABLE_DS_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
ds_git_version=$(grep "STABLE_DS_GIT_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
ds_graph_version=$(grep "STABLE_DS_GRAPH_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
cat <<EOF
const char *tf_local_git_version() {
return "${tf_git_version}";
}
const char *ds_version() {
return "${ds_version}";
}
const char *ds_git_version() {
return "${ds_git_version}";
}
const int ds_graph_version() {
return ${ds_graph_version};
}
EOF
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-electronjs_v5.0-darwin-amd64-opt.yml | build:
template_file: test-darwin-opt-base.tyml
dependencies:
- "darwin-amd64-cpu-opt"
- "test-training_16k-linux-amd64-py36m-opt"
- "homebrew_tests-darwin-amd64"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
system_setup:
>
${nodejs.brew.prep_12}
args:
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 5.0.6 16k"
metadata:
name: "DeepSpeech OSX AMD64 CPU ElectronJS v5.0 tests"
description: "Testing DeepSpeech for OSX/AMD64 on ElectronJS v5.0, CPU only, optimized version"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fstprune.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
#include <fst/fst.h>
#include <fst/weight.h>
DEFINE_double(delta, fst::kDelta, "Comparison/quantization delta");
DEFINE_int64(nstate, fst::kNoStateId, "State number threshold");
DEFINE_string(weight, "", "Weight threshold");
int fstprune_main(int argc, char **argv);
int main(int argc, char **argv) { return fstprune_main(argc, argv); }
| 0 |
coqui_public_repos/TTS/docs | coqui_public_repos/TTS/docs/source/index.md |
```{include} ../../README.md
:relative-images:
```
----
# Documentation Content
```{eval-rst}
.. toctree::
:maxdepth: 2
:caption: Get started
tutorial_for_nervous_beginners
installation
faq
contributing
.. toctree::
:maxdepth: 2
:caption: Using 🐸TTS
inference
docker_images
implementing_a_new_model
implementing_a_new_language_frontend
training_a_model
finetuning
configuration
formatting_your_dataset
what_makes_a_good_dataset
tts_datasets
marytts
.. toctree::
:maxdepth: 2
:caption: Main Classes
main_classes/trainer_api
main_classes/audio_processor
main_classes/model_api
main_classes/dataset
main_classes/gan
main_classes/speaker_manager
.. toctree::
:maxdepth: 2
:caption: `tts` Models
models/glow_tts.md
models/vits.md
models/forward_tts.md
models/tacotron1-2.md
models/overflow.md
models/tortoise.md
models/bark.md
models/xtts.md
.. toctree::
:maxdepth: 2
:caption: `vocoder` Models
```
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/util/fake_ostream.hh | #ifndef UTIL_FAKE_OSTREAM_H
#define UTIL_FAKE_OSTREAM_H
#include "float_to_string.hh"
#include "integer_to_string.hh"
#include "string_piece.hh"
#include <cassert>
#include <limits>
#include <stdint.h>
namespace util {
/* Like std::ostream but without being incredibly slow.
* Supports most of the built-in types except for long double.
*
* The FakeOStream class is intended to be inherited from. The inherting class
* should provide:
* public:
* Derived &flush();
* Derived &write(const void *data, std::size_t length);
*
* private: or protected:
* friend class FakeOStream;
* char *Ensure(std::size_t amount);
* void AdvanceTo(char *to);
*
* The Ensure function makes enough space for an in-place write and returns
* where to write. The AdvanceTo function happens after the write, saying how
* much was actually written.
*
* Precondition:
* amount <= kToStringMaxBytes for in-place writes.
*/
template <class Derived> class FakeOStream {
public:
FakeOStream() {}
// This also covers std::string and char*
Derived &operator<<(StringPiece str) {
return C().write(str.data(), str.size());
}
// Handle integers by size and signedness.
private:
template <class Arg> struct EnableIfKludge {
typedef Derived type;
};
template <class From, unsigned Length = sizeof(From), bool Signed = std::numeric_limits<From>::is_signed, bool IsInteger = std::numeric_limits<From>::is_integer> struct Coerce {};
template <class From> struct Coerce<From, 2, false, true> { typedef uint16_t To; };
template <class From> struct Coerce<From, 4, false, true> { typedef uint32_t To; };
template <class From> struct Coerce<From, 8, false, true> { typedef uint64_t To; };
template <class From> struct Coerce<From, 2, true, true> { typedef int16_t To; };
template <class From> struct Coerce<From, 4, true, true> { typedef int32_t To; };
template <class From> struct Coerce<From, 8, true, true> { typedef int64_t To; };
public:
template <class From> typename EnableIfKludge<typename Coerce<From>::To>::type &operator<<(const From value) {
return CallToString(static_cast<typename Coerce<From>::To>(value));
}
// Character types that get copied as bytes instead of displayed as integers.
Derived &operator<<(char val) { return put(val); }
Derived &operator<<(signed char val) { return put(static_cast<char>(val)); }
Derived &operator<<(unsigned char val) { return put(static_cast<char>(val)); }
Derived &operator<<(bool val) { return put(val + '0'); }
// enums will fall back to int but are not caught by the template.
Derived &operator<<(int val) { return CallToString(static_cast<typename Coerce<int>::To>(val)); }
Derived &operator<<(float val) { return CallToString(val); }
Derived &operator<<(double val) { return CallToString(val); }
// This is here to catch all the other pointer types.
Derived &operator<<(const void *value) { return CallToString(value); }
// This is here because the above line also catches const char*.
Derived &operator<<(const char *value) { return *this << StringPiece(value); }
Derived &operator<<(char *value) { return *this << StringPiece(value); }
Derived &put(char val) {
char *c = C().Ensure(1);
*c = val;
C().AdvanceTo(++c);
return C();
}
char widen(char val) const { return val; }
private:
// References to derived class for convenience.
Derived &C() {
return *static_cast<Derived*>(this);
}
const Derived &C() const {
return *static_cast<const Derived*>(this);
}
// This is separate to prevent an infinite loop if the compiler considers
// types the same (i.e. gcc std::size_t and uint64_t or uint32_t).
template <class T> Derived &CallToString(const T value) {
C().AdvanceTo(ToString(value, C().Ensure(ToStringBuf<T>::kBytes)));
return C();
}
};
} // namespace
#endif // UTIL_FAKE_OSTREAM_H
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/map.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/map.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
FstClass *Map(const FstClass &ifst, MapType map_type, float delta, double power,
const WeightClass &weight) {
if (!ifst.WeightTypesMatch(weight, "Map")) return nullptr;
MapInnerArgs iargs(ifst, map_type, delta, power, weight);
MapArgs args(iargs);
Apply<Operation<MapArgs>>("Map", ifst.ArcType(), &args);
return args.retval;
}
REGISTER_FST_OPERATION(Map, StdArc, MapArgs);
REGISTER_FST_OPERATION(Map, LogArc, MapArgs);
REGISTER_FST_OPERATION(Map, Log64Arc, MapArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/compose.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to compute the composition of two FSTs.
#ifndef FST_COMPOSE_H_
#define FST_COMPOSE_H_
#include <algorithm>
#include <fst/log.h>
#include <fst/cache.h>
#include <fst/compose-filter.h>
#include <fst/fst-decl.h> // For optional argument declarations
#include <fst/lookahead-filter.h>
#include <fst/matcher.h>
#include <fst/state-table.h>
#include <fst/test-properties.h>
namespace fst {
// Delayed composition options templated on the arc type, the matcher,
// the composition filter, and the composition state table. By
// default, the matchers, filter, and state table are constructed by
// composition. If set below, the user can instead pass in these
// objects; in that case, ComposeFst takes their ownership. This
// version controls composition implemented between generic Fst<Arc>
// types and a shared matcher type M for Fst<Arc>. This should be
// adequate for most applications, giving a reasonable tradeoff
// between efficiency and code sharing (but see ComposeFstImplOptions).
template <class Arc, class M = Matcher<Fst<Arc>>,
class Filter = SequenceComposeFilter<M>,
class StateTable =
GenericComposeStateTable<Arc, typename Filter::FilterState>>
struct ComposeFstOptions : public CacheOptions {
M *matcher1; // FST1 matcher.
M *matcher2; // FST2 matcher.
Filter *filter; // Composition filter.
StateTable *state_table; // Composition state table.
explicit ComposeFstOptions(const CacheOptions &opts = CacheOptions(),
M *matcher1 = nullptr, M *matcher2 = nullptr,
Filter *filter = nullptr,
StateTable *state_table = nullptr)
: CacheOptions(opts),
matcher1(matcher1),
matcher2(matcher2),
filter(filter),
state_table(state_table) {}
};
// Forward declaration of ComposeFstMatcher.
template <class C, class F, class T>
class ComposeFstMatcher;
// Delayed composition options templated on the two matcher types, the
// composition filter, the composition state table and the cache store. By
// default, the matchers, filter, state table and cache store are constructed
// by composition. If set below, the user can instead pass in these objects; in
// that case, ComposeFst takes their ownership. This version controls
// composition implemented using arbitrary matchers (of the same arc type but
// otherwise arbitrary FST type). The user must ensure the matchers are
// compatible. These options permit the most efficient use, but shares the
// least code. This is for advanced use only in the most demanding or
// specialized applications that can benefit from it; otherwise, prefer
// ComposeFstOptions).
template <class M1, class M2, class Filter = SequenceComposeFilter<M1, M2>,
class StateTable = GenericComposeStateTable<
typename M1::Arc, typename Filter::FilterState>,
class CacheStore = DefaultCacheStore<typename M1::Arc>>
struct ComposeFstImplOptions : public CacheImplOptions<CacheStore> {
M1 *matcher1; // FST1 matcher (see matcher.h)....
M2 *matcher2; // FST2 matcher.
Filter *filter; // Composition filter (see compose-filter.h).
StateTable
*state_table; // Composition state table (see compose-state-table.h).
bool own_state_table; // ComposeFstImpl takes ownership of 'state_table'?
bool allow_noncommute; // Allow non-commutative weights
explicit ComposeFstImplOptions(const CacheOptions &opts,
M1 *matcher1 = nullptr, M2 *matcher2 = nullptr,
Filter *filter = nullptr,
StateTable *state_table = nullptr)
: CacheImplOptions<CacheStore>(opts),
matcher1(matcher1),
matcher2(matcher2),
filter(filter),
state_table(state_table),
own_state_table(true),
allow_noncommute(false) {}
explicit ComposeFstImplOptions(const CacheImplOptions<CacheStore> &opts,
M1 *matcher1 = nullptr, M2 *matcher2 = nullptr,
Filter *filter = nullptr,
StateTable *state_table = nullptr)
: CacheImplOptions<CacheStore>(opts),
matcher1(matcher1),
matcher2(matcher2),
filter(filter),
state_table(state_table),
own_state_table(true),
allow_noncommute(false) {}
ComposeFstImplOptions()
: matcher1(nullptr),
matcher2(nullptr),
filter(nullptr),
state_table(nullptr),
own_state_table(true),
allow_noncommute(false) {}
};
namespace internal {
// Implementation of delayed composition. This base class is common to the
// variants with different matchers, composition filters and state tables.
template <class Arc, class CacheStore = DefaultCacheStore<Arc>,
class F = ComposeFst<Arc, CacheStore>>
class ComposeFstImplBase
: public CacheBaseImpl<typename CacheStore::State, CacheStore> {
public:
using FST = F;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using State = typename CacheStore::State;
using CacheImpl = CacheBaseImpl<State, CacheStore>;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::Properties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using CacheImpl::HasStart;
using CacheImpl::HasFinal;
using CacheImpl::HasArcs;
using CacheImpl::SetFinal;
using CacheImpl::SetStart;
ComposeFstImplBase(const CacheImplOptions<CacheStore> &opts)
: CacheImpl(opts) {}
ComposeFstImplBase(const CacheOptions &opts) : CacheImpl(opts) {}
ComposeFstImplBase(const ComposeFstImplBase &impl) : CacheImpl(impl, true) {
SetType(impl.Type());
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
}
virtual ComposeFstImplBase *Copy() const = 0;
~ComposeFstImplBase() override {}
StateId Start() {
if (!HasStart()) {
const auto start = ComputeStart();
if (start != kNoStateId) SetStart(start);
}
return CacheImpl::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) SetFinal(s, ComputeFinal(s));
return CacheImpl::Final(s);
}
virtual void Expand(StateId s) = 0;
size_t NumArcs(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl::NumOutputEpsilons(s);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) {
if (!HasArcs(s)) Expand(s);
CacheImpl::InitArcIterator(s, data);
}
virtual MatcherBase<Arc> *InitMatcher(const F &fst,
MatchType match_type) const {
// Use the default matcher if no override is provided.
return nullptr;
}
protected:
virtual StateId ComputeStart() = 0;
virtual Weight ComputeFinal(StateId s) = 0;
};
// Implementation of delayed composition templated on the matchers (see
// matcher.h), composition filter (see compose-filter.h) and the composition
// state table (see compose-state-table.h).
template <class CacheStore, class Filter, class StateTable>
class ComposeFstImpl
: public ComposeFstImplBase<typename CacheStore::Arc, CacheStore> {
public:
using Matcher1 = typename Filter::Matcher1;
using Matcher2 = typename Filter::Matcher2;
using FST1 = typename Matcher1::FST;
using FST2 = typename Matcher2::FST;
using Arc = typename CacheStore::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FilterState = typename Filter::FilterState;
using State = typename CacheStore::State;
using CacheImpl = CacheBaseImpl<State, CacheStore>;
using StateTuple = typename StateTable::StateTuple;
friend class ComposeFstMatcher<CacheStore, Filter, StateTable>;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
template <class M1, class M2>
ComposeFstImpl(const FST1 &fst1, const FST2 &fst2,
const ComposeFstImplOptions<M1, M2, Filter, StateTable,
CacheStore> &opts);
ComposeFstImpl(const ComposeFstImpl &impl)
: ComposeFstImplBase<Arc, CacheStore>(impl),
filter_(new Filter(*impl.filter_, true)),
matcher1_(filter_->GetMatcher1()),
matcher2_(filter_->GetMatcher2()),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()),
state_table_(new StateTable(*impl.state_table_)),
own_state_table_(true),
match_type_(impl.match_type_) {}
~ComposeFstImpl() override {
if (own_state_table_) delete state_table_;
}
ComposeFstImpl *Copy() const override { return new ComposeFstImpl(*this); }
uint64_t Properties() const override { return Properties(kFstProperties); }
// Sets error if found, and returns other FST impl properties.
uint64_t Properties(uint64_t mask) const override {
if ((mask & kError) &&
(fst1_.Properties(kError, false) || fst2_.Properties(kError, false) ||
(matcher1_->Properties(0) & kError) ||
(matcher2_->Properties(0) & kError) |
(filter_->Properties(0) & kError) ||
state_table_->Error())) {
SetProperties(kError, kError);
}
return FstImpl<Arc>::Properties(mask);
}
// Arranges it so that the first arg to OrderedExpand is the Fst
// that will be matched on.
void Expand(StateId s) override {
const auto &tuple = state_table_->Tuple(s);
const auto s1 = tuple.StateId1();
const auto s2 = tuple.StateId2();
filter_->SetState(s1, s2, tuple.GetFilterState());
if (MatchInput(s1, s2)) {
OrderedExpand(s, fst2_, s2, fst1_, s1, matcher2_, true);
} else {
OrderedExpand(s, fst1_, s1, fst2_, s2, matcher1_, false);
}
}
const FST1 &GetFst1() const { return fst1_; }
const FST2 &GetFst2() const { return fst2_; }
const Matcher1 *GetMatcher1() const { return matcher1_; }
Matcher1 *GetMatcher1() { return matcher1_; }
const Matcher2 *GetMatcher2() const { return matcher2_; }
Matcher2 *GetMatcher2() { return matcher2_; }
const Filter *GetFilter() const { return filter_.get(); }
Filter *GetFilter() { return filter_.get(); }
const StateTable *GetStateTable() const { return state_table_; }
StateTable *GetStateTable() { return state_table_; }
MatcherBase<Arc> *InitMatcher(const ComposeFst<Arc, CacheStore> &fst,
MatchType match_type) const override {
const auto test_props = match_type == MATCH_INPUT
? kFstProperties & ~kILabelInvariantProperties
: kFstProperties & ~kOLabelInvariantProperties;
// If both matchers support 'match_type' and we have a guarantee that a
// call to 'filter_->FilterArc(arc1, arc2)' will not modify the ilabel of
// arc1 when MATCH_INPUT or the olabel or arc2 when MATCH_OUTPUT, then
// ComposeFstMatcher can be used.
if ((matcher1_->Type(false) == match_type) &&
(matcher2_->Type(false) == match_type) &&
(filter_->Properties(test_props) == test_props)) {
return new ComposeFstMatcher<
CacheStore, Filter, StateTable>(&fst, match_type);
}
return nullptr;
}
private:
// This does that actual matching of labels in the composition. The
// arguments are ordered so matching is called on state 'sa' of
// 'fsta' for each arc leaving state 'sb' of 'fstb'. The 'match_input' arg
// determines whether the input or output label of arcs at 'sb' is
// the one to match on.
template <class FST, class Matcher>
void OrderedExpand(StateId s, const Fst<Arc> &, StateId sa, const FST &fstb,
StateId sb, Matcher *matchera, bool match_input) {
matchera->SetState(sa);
// First processes non-consuming symbols (e.g., epsilons) on FSTA.
const Arc loop(match_input ? 0 : kNoLabel, match_input ? kNoLabel : 0,
Weight::One(), sb);
MatchArc(s, matchera, loop, match_input);
// Then processes matches on FSTB.
for (ArcIterator<FST> iterb(fstb, sb); !iterb.Done(); iterb.Next()) {
MatchArc(s, matchera, iterb.Value(), match_input);
}
CacheImpl::SetArcs(s);
}
// Matches a single transition from 'fstb' against 'fata' at 's'.
template <class Matcher>
void MatchArc(StateId s, Matcher *matchera, const Arc &arc,
bool match_input) {
if (matchera->Find(match_input ? arc.olabel : arc.ilabel)) {
for (; !matchera->Done(); matchera->Next()) {
auto arca = matchera->Value();
auto arcb = arc;
if (match_input) {
const auto &fs = filter_->FilterArc(&arcb, &arca);
if (fs != FilterState::NoState()) AddArc(s, arcb, arca, fs);
} else {
const auto &fs = filter_->FilterArc(&arca, &arcb);
if (fs != FilterState::NoState()) AddArc(s, arca, arcb, fs);
}
}
}
}
// Add a matching transition at 's'.
void AddArc(StateId s, const Arc &arc1, const Arc &arc2,
const FilterState &f) {
const StateTuple tuple(arc1.nextstate, arc2.nextstate, f);
const Arc oarc(arc1.ilabel, arc2.olabel, Times(arc1.weight, arc2.weight),
state_table_->FindState(tuple));
CacheImpl::PushArc(s, oarc);
}
StateId ComputeStart() override {
const auto s1 = fst1_.Start();
if (s1 == kNoStateId) return kNoStateId;
const auto s2 = fst2_.Start();
if (s2 == kNoStateId) return kNoStateId;
const auto &fs = filter_->Start();
const StateTuple tuple(s1, s2, fs);
return state_table_->FindState(tuple);
}
Weight ComputeFinal(StateId s) override {
const auto &tuple = state_table_->Tuple(s);
const auto s1 = tuple.StateId1();
auto final1 = matcher1_->Final(s1);
if (final1 == Weight::Zero()) return final1;
const auto s2 = tuple.StateId2();
auto final2 = matcher2_->Final(s2);
if (final2 == Weight::Zero()) return final2;
filter_->SetState(s1, s2, tuple.GetFilterState());
filter_->FilterFinal(&final1, &final2);
return Times(final1, final2);
}
// Determines which side to match on per composition state.
bool MatchInput(StateId s1, StateId s2) {
switch (match_type_) {
case MATCH_INPUT:
return true;
case MATCH_OUTPUT:
return false;
default: // MATCH_BOTH
const auto priority1 = matcher1_->Priority(s1);
const auto priority2 = matcher2_->Priority(s2);
if (priority1 == kRequirePriority && priority2 == kRequirePriority) {
FSTERROR() << "ComposeFst: Both sides can't require match";
SetProperties(kError, kError);
return true;
}
if (priority1 == kRequirePriority) return false;
if (priority2 == kRequirePriority) {
return true;
}
return priority1 <= priority2;
}
}
// Identifies and verifies the capabilities of the matcher to be used for
// composition.
void SetMatchType();
std::unique_ptr<Filter> filter_;
Matcher1 *matcher1_; // Borrowed reference.
Matcher2 *matcher2_; // Borrowed reference.
const FST1 &fst1_;
const FST2 &fst2_;
StateTable *state_table_;
bool own_state_table_;
MatchType match_type_;
};
template <class CacheStore, class Filter, class StateTable>
template <class M1, class M2>
ComposeFstImpl<CacheStore, Filter, StateTable>::ComposeFstImpl(
const FST1 &fst1, const FST2 &fst2,
const ComposeFstImplOptions<M1, M2, Filter, StateTable, CacheStore> &opts)
: ComposeFstImplBase<Arc, CacheStore>(opts),
filter_(opts.filter
? opts.filter
: new Filter(fst1, fst2, opts.matcher1, opts.matcher2)),
matcher1_(filter_->GetMatcher1()),
matcher2_(filter_->GetMatcher2()),
fst1_(matcher1_->GetFst()),
fst2_(matcher2_->GetFst()),
state_table_(opts.state_table ? opts.state_table
: new StateTable(fst1_, fst2_)),
own_state_table_(opts.state_table ? opts.own_state_table : true) {
SetType("compose");
if (!CompatSymbols(fst2.InputSymbols(), fst1.OutputSymbols())) {
FSTERROR() << "ComposeFst: Output symbol table of 1st argument "
<< "does not match input symbol table of 2nd argument";
SetProperties(kError, kError);
}
SetInputSymbols(fst1_.InputSymbols());
SetOutputSymbols(fst2_.OutputSymbols());
SetMatchType();
VLOG(2) << "ComposeFstImpl: Match type: " << match_type_;
if (match_type_ == MATCH_NONE) SetProperties(kError, kError);
const auto fprops1 = fst1.Properties(kFstProperties, false);
const auto fprops2 = fst2.Properties(kFstProperties, false);
const auto mprops1 = matcher1_->Properties(fprops1);
const auto mprops2 = matcher2_->Properties(fprops2);
const auto cprops = ComposeProperties(mprops1, mprops2);
SetProperties(filter_->Properties(cprops), kCopyProperties);
if (state_table_->Error()) SetProperties(kError, kError);
}
template <class CacheStore, class Filter, class StateTable>
void ComposeFstImpl<CacheStore, Filter, StateTable>::SetMatchType() {
// Ensures any required matching is possible and known.
if ((matcher1_->Flags() & kRequireMatch) &&
matcher1_->Type(true) != MATCH_OUTPUT) {
FSTERROR() << "ComposeFst: 1st argument cannot perform required matching "
<< "(sort?).";
match_type_ = MATCH_NONE;
return;
}
if ((matcher2_->Flags() & kRequireMatch) &&
matcher2_->Type(true) != MATCH_INPUT) {
FSTERROR() << "ComposeFst: 2nd argument cannot perform required matching "
<< "(sort?).";
match_type_ = MATCH_NONE;
return;
}
// Finds which sides to match on (favoring minimal testing of capabilities).
const auto type1 = matcher1_->Type(false);
const auto type2 = matcher2_->Type(false);
if (type1 == MATCH_OUTPUT && type2 == MATCH_INPUT) {
match_type_ = MATCH_BOTH;
} else if (type1 == MATCH_OUTPUT) {
match_type_ = MATCH_OUTPUT;
} else if (type2 == MATCH_INPUT) {
match_type_ = MATCH_INPUT;
} else if (matcher1_->Type(true) == MATCH_OUTPUT) {
match_type_ = MATCH_OUTPUT;
} else if (matcher2_->Type(true) == MATCH_INPUT) {
match_type_ = MATCH_INPUT;
} else {
FSTERROR() << "ComposeFst: 1st argument cannot match on output labels "
<< "and 2nd argument cannot match on input labels (sort?).";
match_type_ = MATCH_NONE;
}
}
} // namespace internal
// Computes the composition of two transducers. This version is a delayed FST.
// If FST1 transduces string x to y with weight a and FST2 transduces y to z
// with weight b, then their composition transduces string x to z with weight
// Times(x, z).
//
// The output labels of the first transducer or the input labels of the second
// transducer must be sorted (with the default matcher). The weights need to
// form a commutative semiring (valid for TropicalWeight and LogWeight).
//
// Complexity:
//
// Assuming the first FST is unsorted and the second is sorted,
//
// Time: O(v1 v2 d1 (log d2 + m2)),
// Space: O(v1 v2)
//
// where vi = # of states visited, di = maximum out-degree, and mi the
// maximum multiplicity of the states visited, for the ith FST. Constant time
// and space to visit an input state or arc is assumed and exclusive of caching.
//
// Caveats:
// - ComposeFst does not trim its output (since it is a delayed operation).
// - The efficiency of composition can be strongly affected by several factors:
// - the choice of which transducer is sorted - prefer sorting the FST
// that has the greater average out-degree.
// - the amount of non-determinism
// - the presence and location of epsilon transitions - avoid epsilon
// transitions on the output side of the first transducer or
// the input side of the second transducer or prefer placing
// them later in a path since they delay matching and can
// introduce non-coaccessible states and transitions.
//
// This class attaches interface to implementation and handles reference
// counting, delegating most methods to ImplToFst. The CacheStore specifies the
// cache store (default declared in fst-decl.h).
template <class A, class CacheStore /* = DefaultCacheStore<A> */>
class ComposeFst
: public ImplToFst<internal::ComposeFstImplBase<A, CacheStore>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Store = CacheStore;
using State = typename CacheStore::State;
using Impl = internal::ComposeFstImplBase<A, CacheStore>;
friend class ArcIterator<ComposeFst<Arc, CacheStore>>;
friend class StateIterator<ComposeFst<Arc, CacheStore>>;
template <class, class, class> friend class ComposeFstMatcher;
// Compose specifying only caching options.
ComposeFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const CacheOptions &opts = CacheOptions())
: ImplToFst<Impl>(CreateBase(fst1, fst2, opts)) {}
// Compose specifying one shared matcher type M. Requires that the input FSTs
// and matcher FST types be Fst<Arc>. Recommended for best code-sharing and
// matcher compatiblity.
template <class Matcher, class Filter, class StateTuple>
ComposeFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const ComposeFstOptions<Arc, Matcher, Filter, StateTuple> &opts)
: ImplToFst<Impl>(CreateBase1(fst1, fst2, opts)) {}
// Compose specifying two matcher types Matcher1 and Matcher2. Requires input
// FST (of the same Arc type, but o.w. arbitrary) match the corresponding
// matcher FST types). Recommended only for advanced use in demanding or
// specialized applications due to potential code bloat and matcher
// incompatibilities.
template <class Matcher1, class Matcher2, class Filter, class StateTuple>
ComposeFst(const typename Matcher1::FST &fst1,
const typename Matcher2::FST &fst2,
const ComposeFstImplOptions<Matcher1, Matcher2, Filter, StateTuple,
CacheStore> &opts)
: ImplToFst<Impl>(CreateBase2(fst1, fst2, opts)) {}
// See Fst<>::Copy() for doc.
ComposeFst(const ComposeFst<A, CacheStore> &fst, bool safe = false)
: ImplToFst<Impl>(safe ? std::shared_ptr<Impl>(fst.GetImpl()->Copy())
: fst.GetSharedImpl()) {}
// Get a copy of this ComposeFst. See Fst<>::Copy() for further doc.
ComposeFst<A, CacheStore> *Copy(bool safe = false) const override {
return new ComposeFst<A, CacheStore>(*this, safe);
}
inline void InitStateIterator(StateIteratorData<Arc> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
MatcherBase<Arc> *InitMatcher(MatchType match_type) const override {
return GetImpl()->InitMatcher(*this, match_type);
}
protected:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
explicit ComposeFst(std::shared_ptr<Impl> impl) : ImplToFst<Impl>(impl) {}
// Create compose implementation specifying two matcher types.
template <class Matcher1, class Matcher2, class Filter, class StateTuple>
static std::shared_ptr<Impl> CreateBase2(
const typename Matcher1::FST &fst1, const typename Matcher2::FST &fst2,
const ComposeFstImplOptions<Matcher1, Matcher2, Filter, StateTuple,
CacheStore> &opts) {
auto impl = std::make_shared<
internal::ComposeFstImpl<CacheStore, Filter, StateTuple>>(fst1, fst2,
opts);
if (!(Weight::Properties() & kCommutative) && !opts.allow_noncommute) {
const auto props1 = fst1.Properties(kUnweighted, true);
const auto props2 = fst2.Properties(kUnweighted, true);
if (!(props1 & kUnweighted) && !(props2 & kUnweighted)) {
FSTERROR() << "ComposeFst: Weights must be a commutative semiring: "
<< Weight::Type();
impl->SetProperties(kError, kError);
}
}
return impl;
}
// Create compose implementation specifying one matcher type; requires that
// input and matcher FST types be Fst<Arc>.
template <class Matcher, class Filter, class StateTuple>
static std::shared_ptr<Impl> CreateBase1(
const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const ComposeFstOptions<Arc, Matcher, Filter, StateTuple> &opts) {
ComposeFstImplOptions<Matcher, Matcher, Filter, StateTuple, CacheStore>
nopts(opts, opts.matcher1, opts.matcher2, opts.filter,
opts.state_table);
return CreateBase2(fst1, fst2, nopts);
}
// Create compose implementation specifying no matcher type.
static std::shared_ptr<Impl> CreateBase(const Fst<Arc> &fst1,
const Fst<Arc> &fst2,
const CacheOptions &opts) {
switch (LookAheadMatchType(fst1, fst2)) { // Check for lookahead matchers
default:
case MATCH_NONE: { // Default composition (no look-ahead).
ComposeFstOptions<Arc> nopts(opts);
return CreateBase1(fst1, fst2, nopts);
}
case MATCH_OUTPUT: { // Lookahead on fst1.
using M = typename DefaultLookAhead<Arc, MATCH_OUTPUT>::FstMatcher;
using F = typename DefaultLookAhead<Arc, MATCH_OUTPUT>::ComposeFilter;
ComposeFstOptions<Arc, M, F> nopts(opts);
return CreateBase1(fst1, fst2, nopts);
}
case MATCH_INPUT: { // Lookahead on fst2
using M = typename DefaultLookAhead<Arc, MATCH_INPUT>::FstMatcher;
using F = typename DefaultLookAhead<Arc, MATCH_INPUT>::ComposeFilter;
ComposeFstOptions<Arc, M, F> nopts(opts);
return CreateBase1(fst1, fst2, nopts);
}
}
}
private:
ComposeFst &operator=(const ComposeFst &fst) = delete;
};
// Specialization for ComposeFst.
template <class Arc, class CacheStore>
class StateIterator<ComposeFst<Arc, CacheStore>>
: public CacheStateIterator<ComposeFst<Arc, CacheStore>> {
public:
explicit StateIterator(const ComposeFst<Arc, CacheStore> &fst)
: CacheStateIterator<ComposeFst<Arc, CacheStore>>(fst,
fst.GetMutableImpl()) {}
};
// Specialization for ComposeFst.
template <class Arc, class CacheStore>
class ArcIterator<ComposeFst<Arc, CacheStore>>
: public CacheArcIterator<ComposeFst<Arc, CacheStore>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const ComposeFst<Arc, CacheStore> &fst, StateId s)
: CacheArcIterator<ComposeFst<Arc, CacheStore>>(fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s);
}
};
template <class Arc, class CacheStore>
inline void ComposeFst<Arc, CacheStore>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<ComposeFst<Arc, CacheStore>>(*this);
}
// Specialized matcher for ComposeFst. Supports MATCH_INPUT or MATCH_OUTPUT,
// iff the underlying matchers for the two FSTS being composed support
// MATCH_INPUT or MATCH_OUTPUT, respectively.
template <class CacheStore, class Filter, class StateTable>
class ComposeFstMatcher : public MatcherBase<typename CacheStore::Arc> {
public:
using Arc = typename CacheStore::Arc;
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Matcher1 = typename Filter::Matcher1;
using Matcher2 = typename Filter::Matcher2;
using FilterState = typename Filter::FilterState;
using StateTuple = typename StateTable::StateTuple;
using Impl = internal::ComposeFstImpl<CacheStore, Filter, StateTable>;
// The compose FST arg must match the filter and state table types.
// This makes a copy of the FST.
ComposeFstMatcher(const ComposeFst<Arc, CacheStore> &fst,
MatchType match_type)
: owned_fst_(fst.Copy()),
fst_(*owned_fst_),
impl_(static_cast<const Impl *>(fst_.GetImpl())),
s_(kNoStateId),
match_type_(match_type),
matcher1_(impl_->matcher1_->Copy()),
matcher2_(impl_->matcher2_->Copy()),
current_loop_(false),
loop_(kNoLabel, 0, Weight::One(), kNoStateId) {
if (match_type_ == MATCH_OUTPUT) std::swap(loop_.ilabel, loop_.olabel);
}
// The compose FST arg must match the filter and state table types.
// This doesn't copy the FST (although it may copy components).
ComposeFstMatcher(const ComposeFst<Arc, CacheStore> *fst,
MatchType match_type)
: fst_(*fst),
impl_(static_cast<const Impl *>(fst_.GetImpl())),
s_(kNoStateId),
match_type_(match_type),
matcher1_(impl_->matcher1_->Copy()),
matcher2_(impl_->matcher2_->Copy()),
current_loop_(false),
loop_(kNoLabel, 0, Weight::One(), kNoStateId) {
if (match_type_ == MATCH_OUTPUT) std::swap(loop_.ilabel, loop_.olabel);
}
// This makes a copy of the FST.
ComposeFstMatcher(
const ComposeFstMatcher<CacheStore, Filter, StateTable> &matcher,
bool safe = false)
: owned_fst_(matcher.fst_.Copy(safe)),
fst_(*owned_fst_),
impl_(static_cast<const Impl *>(fst_.GetImpl())),
s_(kNoStateId),
match_type_(matcher.match_type_),
matcher1_(matcher.matcher1_->Copy(safe)),
matcher2_(matcher.matcher2_->Copy(safe)),
current_loop_(false),
loop_(kNoLabel, 0, Weight::One(), kNoStateId) {
if (match_type_ == MATCH_OUTPUT) std::swap(loop_.ilabel, loop_.olabel);
}
ComposeFstMatcher<CacheStore, Filter, StateTable> *Copy(
bool safe = false) const override {
return new ComposeFstMatcher<CacheStore, Filter, StateTable>(*this, safe);
}
MatchType Type(bool test) const override {
if ((matcher1_->Type(test) == MATCH_NONE) ||
(matcher2_->Type(test) == MATCH_NONE)) {
return MATCH_NONE;
}
if (((matcher1_->Type(test) == MATCH_UNKNOWN) &&
(matcher2_->Type(test) == MATCH_UNKNOWN)) ||
((matcher1_->Type(test) == MATCH_UNKNOWN) &&
(matcher2_->Type(test) == match_type_)) ||
((matcher1_->Type(test) == match_type_) &&
(matcher2_->Type(test) == MATCH_UNKNOWN))) {
return MATCH_UNKNOWN;
}
if ((matcher1_->Type(test) == match_type_) &&
(matcher2_->Type(test) == match_type_)) {
return match_type_;
}
return MATCH_NONE;
}
const Fst<Arc> &GetFst() const override { return fst_; }
uint64_t Properties(uint64_t inprops) const override {
return inprops;
}
void SetState(StateId s) final {
if (s_ == s) return;
s_ = s;
const auto &tuple = impl_->state_table_->Tuple(s);
matcher1_->SetState(tuple.StateId1());
matcher2_->SetState(tuple.StateId2());
loop_.nextstate = s_;
}
bool Find(Label label) final {
bool found = false;
current_loop_ = false;
if (label == 0) {
current_loop_ = true;
found = true;
}
if (match_type_ == MATCH_INPUT) {
found = found || FindLabel(label, matcher1_.get(), matcher2_.get());
} else { // match_type_ == MATCH_OUTPUT
found = found || FindLabel(label, matcher2_.get(), matcher1_.get());
}
return found;
}
bool Done() const final {
return !current_loop_ && matcher1_->Done() && matcher2_->Done();
}
const Arc &Value() const final { return current_loop_ ? loop_ : arc_; }
void Next() final {
if (current_loop_) {
current_loop_ = false;
} else if (match_type_ == MATCH_INPUT) {
FindNext(matcher1_.get(), matcher2_.get());
} else { // match_type_ == MATCH_OUTPUT
FindNext(matcher2_.get(), matcher1_.get());
}
}
std::ptrdiff_t Priority(StateId s) final { return fst_.NumArcs(s); }
private:
// Processes a match with the filter and creates resulting arc.
bool MatchArc(StateId s, Arc arc1,
Arc arc2) { // FIXME(kbg): copy but not assignment.
const auto &fs = impl_->filter_->FilterArc(&arc1, &arc2);
if (fs == FilterState::NoState()) return false;
const StateTuple tuple(arc1.nextstate, arc2.nextstate, fs);
arc_.ilabel = arc1.ilabel;
arc_.olabel = arc2.olabel;
arc_.weight = Times(arc1.weight, arc2.weight);
arc_.nextstate = impl_->state_table_->FindState(tuple);
return true;
}
// Finds the first match allowed by the filter.
template <class MatcherA, class MatcherB>
bool FindLabel(Label label, MatcherA *matchera, MatcherB *matcherb) {
if (matchera->Find(label)) {
matcherb->Find(match_type_ == MATCH_INPUT ? matchera->Value().olabel
: matchera->Value().ilabel);
return FindNext(matchera, matcherb);
}
return false;
}
// Finds the next match allowed by the filter, returning true iff such a
// match is found.
template <class MatcherA, class MatcherB>
bool FindNext(MatcherA *matchera, MatcherB *matcherb) {
// State when entering this function:
// 'matchera' is pointed to a match x, y for label x, and a match for y was
// requested on 'matcherb'.
while (!matchera->Done() || !matcherb->Done()) {
if (matcherb->Done()) {
// If no more matches for y on 'matcherb', moves forward on 'matchera'
// until a match x, y' is found such that there is a match for y' on
// 'matcherb'.
matchera->Next();
while (!matchera->Done() &&
!matcherb->Find(match_type_ == MATCH_INPUT
? matchera->Value().olabel
: matchera->Value().ilabel)) {
matchera->Next();
}
}
while (!matcherb->Done()) {
// 'matchera' is pointing to a match x, y' ('arca') and 'matcherb' is
// pointing to a match y', z' ('arcb'). If combining these two arcs is
// allowed by the filter (hence resulting in an arc x, z') return true.
// Position 'matcherb' on the next potential match for y' before
// returning.
const auto &arca = matchera->Value();
const auto &arcb = matcherb->Value();
// Position 'matcherb' on the next potential match for y'.
matcherb->Next();
// Returns true If combining these two arcs is allowed by the filter
// (hence resulting in an arc x, z'); otherwise consider next match
// for y' on 'matcherb'.
if (MatchArc(s_, match_type_ == MATCH_INPUT ? arca : arcb,
match_type_ == MATCH_INPUT ? arcb : arca)) {
return true;
}
}
}
// Both 'matchera' and 'matcherb' are done, no more match to analyse.
return false;
}
std::unique_ptr<const ComposeFst<Arc, CacheStore>> owned_fst_;
const ComposeFst<Arc, CacheStore> &fst_;
const Impl *impl_;
StateId s_;
MatchType match_type_;
std::unique_ptr<Matcher1> matcher1_;
std::unique_ptr<Matcher2> matcher2_;
bool current_loop_;
Arc loop_;
Arc arc_;
};
// Useful alias when using StdArc.
using StdComposeFst = ComposeFst<StdArc>;
enum ComposeFilter {
AUTO_FILTER,
NULL_FILTER,
TRIVIAL_FILTER,
SEQUENCE_FILTER,
ALT_SEQUENCE_FILTER,
MATCH_FILTER
};
struct ComposeOptions {
bool connect; // Connect output?
ComposeFilter filter_type; // Pre-defined filter to use.
explicit ComposeOptions(bool connect = true,
ComposeFilter filter_type = AUTO_FILTER)
: connect(connect), filter_type(filter_type) {}
};
// Computes the composition of two transducers. This version writes
// the composed FST into a MutableFst. If FST1 transduces string x to
// y with weight a and FST2 transduces y to z with weight b, then
// their composition transduces string x to z with weight
// Times(x, z).
//
// The output labels of the first transducer or the input labels of
// the second transducer must be sorted. The weights need to form a
// commutative semiring (valid for TropicalWeight and LogWeight).
//
// Complexity:
//
// Assuming the first FST is unsorted and the second is sorted:
//
// Time: O(V1 V2 D1 (log D2 + M2)),
// Space: O(V1 V2 D1 M2)
//
// where Vi = # of states, Di = maximum out-degree, and Mi is the maximum
// multiplicity, for the ith FST.
//
// Caveats:
//
// - Compose trims its output.
// - The efficiency of composition can be strongly affected by several factors:
// - the choice of which transducer is sorted - prefer sorting the FST
// that has the greater average out-degree.
// - the amount of non-determinism
// - the presence and location of epsilon transitions - avoid epsilon
// transitions on the output side of the first transducer or
// the input side of the second transducer or prefer placing
// them later in a path since they delay matching and can
// introduce non-coaccessible states and transitions.
template <class Arc>
void Compose(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2,
MutableFst<Arc> *ofst,
const ComposeOptions &opts = ComposeOptions()) {
using M = Matcher<Fst<Arc>>;
// In each case, we cache only the last state for fastest copy.
switch (opts.filter_type) {
case AUTO_FILTER: {
CacheOptions nopts;
nopts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, nopts);
break;
}
case NULL_FILTER: {
ComposeFstOptions<Arc, M, NullComposeFilter<M>> copts;
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
break;
}
case SEQUENCE_FILTER: {
ComposeFstOptions<Arc, M, SequenceComposeFilter<M>> copts;
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
break;
}
case ALT_SEQUENCE_FILTER: {
ComposeFstOptions<Arc, M, AltSequenceComposeFilter<M>> copts;
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
break;
}
case MATCH_FILTER: {
ComposeFstOptions<Arc, M, MatchComposeFilter<M>> copts;
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
break;
}
case TRIVIAL_FILTER: {
ComposeFstOptions<Arc, M, TrivialComposeFilter<M>> copts;
copts.gc_limit = 0;
*ofst = ComposeFst<Arc>(ifst1, ifst2, copts);
break;
}
}
if (opts.connect) Connect(ofst);
}
} // namespace fst
#endif // FST_COMPOSE_H_
| 0 |
coqui_public_repos/coqui-py/docs | coqui_public_repos/coqui-py/docs/source/api.rst | API documentation
=================
.. python-apigen-group:: api
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/README | OpenFst: Release 1.6.7.
OpenFst is a library for constructing, combining, optimizing, and searching
weighted finite-state transducers (FSTs).
REQUIREMENTS:
This version is known to work under Linux using g++ (>= 4.7) and OS X using
XCode (>= 5). It is expected to work wherever adequate POSIX (dlopen,
ssize_t, basename), C99 (snprintf, strtoll, <stdint.h>), and C++11
(<unordered_set>, <unordered_map>, <forward_list>) support is available.
INSTALLATION:
Follow the generic GNU build system instructions in ./INSTALL. We
recommend configuring with --enable-static=no for faster compiles.
Optional features:
--enable-bin Enable fst::script and executables (def: yes)
--enable-compact-fsts Enable CompactFst extensions (def: no)
--enable-compress Enable compression extension (def: no)
--enable-const-fsts Enable ConstFst extensions (def: no)
--enable-far Enable FAR extensions (def: no)
--enable-grm Enable all dependencies of OpenGrm (def: no)
--enable-linear-fsts Enable LinearTagger/ClassifierFst extensions (def: no)
--enable-lookahead-fsts Enable LookAheadFst extensions (def: no)
--enable-mpdt Enable MPDT extensions (def: no)
--enable-ngram-fsts Enable NGramFst extensions (def: no)
--enable-pdt Enable PDT extensions (def: no)
--enable-python Enable Python extension (def: no)
--enable-special Enable special-matcher extensions (def: no)
Configuring with --enable-bin=no gives very fast compiles, but excludes the
command line utilities.
Configuring with --enable-python will attempt to install the Python module to
whichever site-packages (or dist-packages, on Debian or Ubuntu) is found
during configuration.
The flag --with-libfstdir specifies where FST extensions should be installed;
it defaults to ${libdir}/fst.
Compiling with -Wall -Wno-sign-compare under g++ should give no warnings from
this library.
If you encounter an error about loading shared objects when attempting to use
the library immediately after installation, (e.g, `...cannot open shared
object file...`) you may need to refresh your system's shared object cache.
On Linux, this is accomplished by invoking ldconfig; the corresponding command
on OS X is called update_dyld_shared_cache. Both of these require superuser
privileges (and so should be executed with sudo).
USAGE:
Assuming you've installed under the default /usr/local, the FST binaries are
found on /usr/local/bin.
To use in your own program, include <fst/fstlib.h> and compile with
-I/usr/local/include. The compiler must support C++11 (for g++ add the flag
-std=c++11). Link against /usr/local/lib/libfst.so and -ldl. Set your
LD_LIBRARY_PATH (or equivalent) to contain /usr/local/lib. The linking is,
by default, dynamic so that the Fst and Arc type DSO extensions can be used
correctly if desired. Any extensions will be found under /usr/local/lib/fst
or /usr/local/include/fst/extensions.
DOCUMENTATION:
See www.openfst.org for general documentation.
See ./NEWS for updates since the last release.
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/ci_scripts/notebook-tests.sh | #!/bin/bash
set -xe
source $(dirname "$0")/all-vars.sh
source $(dirname "$0")/all-utils.sh
time jupyter nbconvert --to notebook --execute notebooks/easy_transfer_learning.ipynb
time jupyter nbconvert --to notebook --execute notebooks/train_your_first_coqui_STT_model.ipynb
time jupyter nbconvert --to notebook --execute notebooks/train_with_common_voice.ipynb
| 0 |
coqui_public_repos/STT-models/welsh/techiaith | coqui_public_repos/STT-models/welsh/techiaith/v21.03/alphabet.txt |
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
r
s
t
u
v
w
y
z
á
â
ä
é
ê
ë
î
ï
ô
ö
ô
û
ŵ
ŷ
'
.
!
?
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/ngram/bitmap-index.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/extensions/ngram/bitmap-index.h>
#include <algorithm>
#include <iterator>
#include <fst/log.h>
#include <fst/extensions/ngram/nthbit.h>
namespace fst {
namespace {
const size_t kPrimaryBlockBits =
BitmapIndex::kStorageBitSize * BitmapIndex::kSecondaryBlockSize;
// If [begin, begin+size) is a monotonically increasing running sum of
// popcounts for a bitmap, this will return the index of the word that contains
// the value'th zero. If value is larger then the number of zeros in the
// bitmap, size will be returned. The idea is that the number of zerocounts
// (i.e. the popcount of logical NOT of values) is offset * kStorageBitSize
// minus the value for each element of the running sum.
template <size_t BlockSize, typename Container>
size_t InvertedSearch(const Container& c,
size_t first_idx,
size_t last_idx,
size_t value) {
const size_t begin_idx = first_idx;
while (first_idx != last_idx) {
// Invariant: [first_idx, last_idx) is the search range.
size_t mid_idx = first_idx + ((last_idx - first_idx) / 2);
size_t mid_value = BlockSize * (1 + (mid_idx - begin_idx)) - c[mid_idx];
if (mid_value < value) {
first_idx = mid_idx + 1;
} else {
last_idx = mid_idx;
}
}
return first_idx;
}
} // namespace
size_t BitmapIndex::Rank1(size_t end) const {
if (end == 0) return 0;
const uint32 end_word = (end - 1) >> BitmapIndex::kStorageLogBitSize;
const uint32 sum = get_index_ones_count(end_word);
const size_t masked = end & kStorageBlockMask;
if (masked == 0) {
return sum + __builtin_popcountll(bits_[end_word]);
} else {
const uint64 zero = 0;
return sum + __builtin_popcountll(bits_[end_word] &
(~zero >> (kStorageBitSize - masked)));
}
}
size_t BitmapIndex::Select1(size_t bit_index) const {
if (bit_index >= GetOnesCount()) return Bits();
// search primary index for the relevant block
uint32 rembits = bit_index + 1;
const uint32 block = find_primary_block(bit_index + 1);
uint32 offset = 0;
if (block > 0) {
rembits -= primary_index_[block - 1];
offset += block * kSecondaryBlockSize;
}
// search the secondary index
uint32 word = find_secondary_block(offset, rembits);
if (word > 0) {
rembits -= secondary_index_[offset + word - 1];
offset += word;
}
int nth = nth_bit(bits_[offset], rembits);
return (offset << BitmapIndex::kStorageLogBitSize) + nth;
}
size_t BitmapIndex::Select0(size_t bit_index) const {
if (bit_index >= Bits() - GetOnesCount()) return Bits();
// search inverted primary index for relevant block
uint32 remzeros = bit_index + 1;
uint32 offset = 0;
const uint32 block = find_inverted_primary_block(bit_index + 1);
if (block > 0) {
remzeros -= kPrimaryBlockBits * block - primary_index_[block - 1];
offset += block * kSecondaryBlockSize;
}
// search the inverted secondary index
uint32 word = find_inverted_secondary_block(offset, remzeros);
if (word > 0) {
remzeros -= BitmapIndex::kStorageBitSize * word -
secondary_index_[offset + word - 1];
offset += word;
}
int nth = nth_bit(~bits_[offset], remzeros);
return (offset << BitmapIndex::kStorageLogBitSize) + nth;
}
std::pair<size_t, size_t> BitmapIndex::Select0s(size_t bit_index) const {
const uint64 zero = 0;
const uint64 ones = ~zero;
size_t zeros_count = Bits() - GetOnesCount();
if (bit_index >= zeros_count) return std::make_pair(Bits(), Bits());
if (bit_index + 1 >= zeros_count) {
return std::make_pair(Select0(bit_index), Bits());
}
// search inverted primary index for relevant block
uint32 remzeros = bit_index + 1;
uint32 offset = 0;
const uint32 block = find_inverted_primary_block(bit_index + 1);
size_t num_zeros_in_block =
kPrimaryBlockBits * (1 + block) - primary_index_[block];
if (block > 0) {
size_t num_zeros_next =
kPrimaryBlockBits * block - primary_index_[block - 1];
num_zeros_in_block -= num_zeros_next;
remzeros -= num_zeros_next;
offset += block * kSecondaryBlockSize;
}
// search the inverted secondary index
uint32 word = find_inverted_secondary_block(offset, remzeros);
uint32 sum_zeros_next_word = BitmapIndex::kStorageBitSize * (1 + word) -
secondary_index_[offset + word];
uint32 sum_zeros_this_word = 0;
if (word > 0) {
sum_zeros_this_word = BitmapIndex::kStorageBitSize * word -
secondary_index_[offset + word - 1];
remzeros -= sum_zeros_this_word;
offset += word;
}
int nth = nth_bit(~bits_[offset], remzeros);
size_t current_zero = (offset << BitmapIndex::kStorageLogBitSize) + nth;
size_t next_zero;
// Does the current block contain the next zero?
if (num_zeros_in_block > remzeros + 1) {
if (sum_zeros_next_word - sum_zeros_this_word >= remzeros + 1) {
// the next zero is in this word
next_zero = (offset << BitmapIndex::kStorageLogBitSize) +
nth_bit(~bits_[offset], remzeros + 1);
} else {
// Find the first field that is not all ones by linear scan.
// In the worst case, this may scan 8Kbytes. The alternative is
// to inspect secondary_index_ looking for a place to jump to, but
// that would probably use more cache.
while (bits_[++offset] == ones) {
}
next_zero = (offset << BitmapIndex::kStorageLogBitSize) +
__builtin_ctzll(~bits_[offset]);
}
} else {
// the next zero is in a different block, a full search is required.
next_zero = Select0(bit_index + 1);
}
return std::make_pair(current_zero, next_zero);
}
size_t BitmapIndex::get_index_ones_count(size_t array_index) const {
uint32 sum = 0;
if (array_index > 0) {
sum += secondary_index_[array_index - 1];
uint32 end_block = (array_index - 1) / kSecondaryBlockSize;
if (end_block > 0) sum += primary_index_[end_block - 1];
}
return sum;
}
void BitmapIndex::BuildIndex(const uint64 *bits, size_t size) {
bits_ = bits;
size_ = size;
primary_index_.resize(primary_index_size());
secondary_index_.resize(ArraySize());
const uint64 zero = 0;
const uint64 ones = ~zero;
uint32 popcount = 0;
for (uint32 block = 0; block * kSecondaryBlockSize < ArraySize(); block++) {
uint32 block_popcount = 0;
uint32 block_begin = block * kSecondaryBlockSize;
uint32 block_end = block_begin + kSecondaryBlockSize;
if (block_end > ArraySize()) block_end = ArraySize();
for (uint32 j = block_begin; j < block_end; ++j) {
uint64 mask = ones;
if (j == ArraySize() - 1) {
mask = ones >> (-size_ & BitmapIndex::kStorageBlockMask);
}
block_popcount += __builtin_popcountll(bits_[j] & mask);
secondary_index_[j] = block_popcount;
}
popcount += block_popcount;
primary_index_[block] = popcount;
}
}
size_t BitmapIndex::find_secondary_block(size_t block_begin,
size_t rem_bit_index) const {
size_t block_end = block_begin + kSecondaryBlockSize;
if (block_end > ArraySize()) block_end = ArraySize();
return std::distance(
secondary_index_.begin() + block_begin,
std::lower_bound(secondary_index_.begin() + block_begin,
secondary_index_.begin() + block_end, rem_bit_index));
}
size_t BitmapIndex::find_inverted_secondary_block(size_t block_begin,
size_t rem_bit_index) const {
size_t block_end = block_begin + kSecondaryBlockSize;
if (block_end > ArraySize()) block_end = ArraySize();
return InvertedSearch<BitmapIndex::kStorageBitSize>(secondary_index_,
block_begin, block_end,
rem_bit_index)
- block_begin;
}
inline size_t BitmapIndex::find_primary_block(size_t bit_index) const {
return std::distance(
primary_index_.begin(),
std::lower_bound(primary_index_.begin(),
primary_index_.begin() + primary_index_size(),
bit_index));
}
size_t BitmapIndex::find_inverted_primary_block(size_t bit_index) const {
return InvertedSearch<kPrimaryBlockBits>(
primary_index_, 0, primary_index_.size(), bit_index);
}
} // end namespace fst
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-python_36_16k-linux-amd64-prod_pbmodel-opt.yml | build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-cpu-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python-tests-prod.sh 3.6.10:m 16k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU Python v3.6 prod tests (16kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.6 on prod model, CPU only, optimized version (16kHz)"
| 0 |
coqui_public_repos/STT/native_client/kenlm | coqui_public_repos/STT/native_client/kenlm/lm/config.hh | #ifndef LM_CONFIG_H
#define LM_CONFIG_H
#include "lm_exception.hh"
#include "../util/mmap.hh"
#include <iosfwd>
#include <string>
#include <vector>
/* Configuration for ngram model. Separate header to reduce pollution. */
#if defined _MSC_VER
#define KENLM_EXPORT __declspec(dllexport)
#else
#define KENLM_EXPORT __attribute__ ((visibility("default")))
#endif /* _MSC_VER */
namespace lm {
class EnumerateVocab;
namespace ngram {
struct KENLM_EXPORT Config {
// EFFECTIVE FOR BOTH ARPA AND BINARY READS
// (default true) print progress bar to messages
bool show_progress;
// Where to log messages including the progress bar. Set to NULL for
// silence.
std::ostream *messages;
std::ostream *ProgressMessages() const {
return show_progress ? messages : 0;
}
// This will be called with every string in the vocabulary by the
// constructor; it need only exist for the lifetime of the constructor.
// See enumerate_vocab.hh for more detail. Config does not take ownership;
// just delete/let it go out of scope after the constructor exits.
EnumerateVocab *enumerate_vocab;
// ONLY EFFECTIVE WHEN READING ARPA
// What to do when <unk> isn't in the provided model.
WarningAction unknown_missing;
// What to do when <s> or </s> is missing from the model.
// If THROW_UP, the exception will be of type util::SpecialWordMissingException.
WarningAction sentence_marker_missing;
// What to do with a positive log probability. For COMPLAIN and SILENT, map
// to 0.
WarningAction positive_log_probability;
// The probability to substitute for <unk> if it's missing from the model.
// No effect if the model has <unk> or unknown_missing == THROW_UP.
float unknown_missing_logprob;
// Size multiplier for probing hash table. Must be > 1. Space is linear in
// this. Time is probing_multiplier / (probing_multiplier - 1). No effect
// for sorted variant.
// If you find yourself setting this to a low number, consider using the
// TrieModel which has lower memory consumption.
float probing_multiplier;
// Amount of memory to use for building. The actual memory usage will be
// higher since this just sets sort buffer size. Only applies to trie
// models.
std::size_t building_memory;
// Template for temporary directory appropriate for passing to mkdtemp.
// The characters XXXXXX are appended before passing to mkdtemp. Only
// applies to trie. If empty, defaults to write_mmap. If that's NULL,
// defaults to input file name.
std::string temporary_directory_prefix;
// Level of complaining to do when loading from ARPA instead of binary format.
enum ARPALoadComplain {ALL, EXPENSIVE, NONE};
ARPALoadComplain arpa_complain;
// While loading an ARPA file, also write out this binary format file. Set
// to NULL to disable.
const char *write_mmap;
enum WriteMethod {
WRITE_MMAP, // Map the file directly.
WRITE_AFTER // Write after we're done.
};
WriteMethod write_method;
// Include the vocab in the binary file? Only effective if write_mmap != NULL.
bool include_vocab;
// Left rest options. Only used when the model includes rest costs.
enum RestFunction {
REST_MAX, // Maximum of any score to the left
REST_LOWER, // Use lower-order files given below.
};
RestFunction rest_function;
// Only used for REST_LOWER.
std::vector<std::string> rest_lower_files;
// Quantization options. Only effective for QuantTrieModel. One value is
// reserved for each of prob and backoff, so 2^bits - 1 buckets will be used
// to quantize (and one of the remaining backoffs will be 0).
uint8_t prob_bits, backoff_bits;
// Bhiksha compression (simple form). Only works with trie.
uint8_t pointer_bhiksha_bits;
// ONLY EFFECTIVE WHEN READING BINARY
// How to get the giant array into memory: lazy mmap, populate, read etc.
// See util/mmap.hh for details of MapMethod.
util::LoadMethod load_method;
// Set defaults.
Config();
};
} /* namespace ngram */ } /* namespace lm */
#endif // LM_CONFIG_H
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/script/convert.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#ifndef FST_SCRIPT_CONVERT_H_
#define FST_SCRIPT_CONVERT_H_
#include <memory>
#include <string>
#include <utility>
#include <fst/register.h>
#include <fst/script/arg-packs.h>
#include <fst/script/fst-class.h>
namespace fst {
namespace script {
using ConvertInnerArgs = std::pair<const FstClass &, const string &>;
using ConvertArgs = WithReturnValue<FstClass *, ConvertInnerArgs>;
template <class Arc>
void Convert(ConvertArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(args->args).GetFst<Arc>());
const string &new_type = std::get<1>(args->args);
std::unique_ptr<Fst<Arc>> result(Convert(fst, new_type));
args->retval = result ? new FstClass(*result) : nullptr;
}
FstClass *Convert(const FstClass &fst, const string &new_type);
} // namespace script
} // namespace fst
#endif // FST_SCRIPT_CONVERT_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/factor-weight.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes to factor weights in an FST.
#ifndef FST_FACTOR_WEIGHT_H_
#define FST_FACTOR_WEIGHT_H_
#include <algorithm>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/cache.h>
#include <fst/test-properties.h>
namespace fst {
constexpr uint32 kFactorFinalWeights = 0x00000001;
constexpr uint32 kFactorArcWeights = 0x00000002;
template <class Arc>
struct FactorWeightOptions : CacheOptions {
using Label = typename Arc::Label;
float delta;
uint32 mode; // Factor arc weights and/or final weights.
Label final_ilabel; // Input label of arc when factoring final weights.
Label final_olabel; // Output label of arc when factoring final weights.
bool increment_final_ilabel; // When factoring final w' results in > 1 arcs
bool increment_final_olabel; // at state, increment labels to make distinct?
explicit FactorWeightOptions(const CacheOptions &opts, float delta = kDelta,
uint32 mode = kFactorArcWeights |
kFactorFinalWeights,
Label final_ilabel = 0, Label final_olabel = 0,
bool increment_final_ilabel = false,
bool increment_final_olabel = false)
: CacheOptions(opts),
delta(delta),
mode(mode),
final_ilabel(final_ilabel),
final_olabel(final_olabel),
increment_final_ilabel(increment_final_ilabel),
increment_final_olabel(increment_final_olabel) {}
explicit FactorWeightOptions(float delta = kDelta,
uint32 mode = kFactorArcWeights |
kFactorFinalWeights,
Label final_ilabel = 0, Label final_olabel = 0,
bool increment_final_ilabel = false,
bool increment_final_olabel = false)
: delta(delta),
mode(mode),
final_ilabel(final_ilabel),
final_olabel(final_olabel),
increment_final_ilabel(increment_final_ilabel),
increment_final_olabel(increment_final_olabel) {}
};
// A factor iterator takes as argument a weight w and returns a sequence of
// pairs of weights (xi, yi) such that the sum of the products xi times yi is
// equal to w. If w is fully factored, the iterator should return nothing.
//
// template <class W>
// class FactorIterator {
// public:
// explicit FactorIterator(W w);
//
// bool Done() const;
//
// void Next();
//
// std::pair<W, W> Value() const;
//
// void Reset();
// }
// Factors trivially.
template <class W>
class IdentityFactor {
public:
explicit IdentityFactor(const W &weight) {}
bool Done() const { return true; }
void Next() {}
std::pair<W, W> Value() const { return std::make_pair(W::One(), W::One()); }
void Reset() {}
};
// Factors a StringWeight w as 'ab' where 'a' is a label.
template <typename Label, StringType S = STRING_LEFT>
class StringFactor {
public:
explicit StringFactor(const StringWeight<Label, S> &weight)
: weight_(weight), done_(weight.Size() <= 1) {}
bool Done() const { return done_; }
void Next() { done_ = true; }
std::pair<StringWeight<Label, S>, StringWeight<Label, S>> Value() const {
using Weight = StringWeight<Label, S>;
typename Weight::Iterator siter(weight_);
Weight w1(siter.Value());
Weight w2;
for (siter.Next(); !siter.Done(); siter.Next()) w2.PushBack(siter.Value());
return std::make_pair(w1, w2);
}
void Reset() { done_ = weight_.Size() <= 1; }
private:
const StringWeight<Label, S> weight_;
bool done_;
};
// Factor a GallicWeight using StringFactor.
template <class Label, class W, GallicType G = GALLIC_LEFT>
class GallicFactor {
public:
using GW = GallicWeight<Label, W, G>;
explicit GallicFactor(const GW &weight)
: weight_(weight), done_(weight.Value1().Size() <= 1) {}
bool Done() const { return done_; }
void Next() { done_ = true; }
std::pair<GW, GW> Value() const {
StringFactor<Label, GallicStringType(G)> siter(weight_.Value1());
GW w1(siter.Value().first, weight_.Value2());
GW w2(siter.Value().second, W::One());
return std::make_pair(w1, w2);
}
void Reset() { done_ = weight_.Value1().Size() <= 1; }
private:
const GW weight_;
bool done_;
};
// Specialization for the (general) GALLIC type GallicWeight.
template <class Label, class W>
class GallicFactor<Label, W, GALLIC> {
public:
using GW = GallicWeight<Label, W, GALLIC>;
using GRW = GallicWeight<Label, W, GALLIC_RESTRICT>;
explicit GallicFactor(const GW &weight)
: iter_(weight),
done_(weight.Size() == 0 ||
(weight.Size() == 1 && weight.Back().Value1().Size() <= 1)) {}
bool Done() const { return done_ || iter_.Done(); }
void Next() { iter_.Next(); }
void Reset() { iter_.Reset(); }
std::pair<GW, GW> Value() const {
const auto weight = iter_.Value();
StringFactor<Label, GallicStringType(GALLIC_RESTRICT)> siter(
weight.Value1());
GRW w1(siter.Value().first, weight.Value2());
GRW w2(siter.Value().second, W::One());
return std::make_pair(GW(w1), GW(w2));
}
private:
UnionWeightIterator<GRW, GallicUnionWeightOptions<Label, W>> iter_;
bool done_;
};
namespace internal {
// Implementation class for FactorWeight
template <class Arc, class FactorIterator>
class FactorWeightFstImpl : public CacheImpl<Arc> {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using FstImpl<Arc>::SetType;
using FstImpl<Arc>::SetProperties;
using FstImpl<Arc>::SetInputSymbols;
using FstImpl<Arc>::SetOutputSymbols;
using CacheBaseImpl<CacheState<Arc>>::PushArc;
using CacheBaseImpl<CacheState<Arc>>::HasStart;
using CacheBaseImpl<CacheState<Arc>>::HasFinal;
using CacheBaseImpl<CacheState<Arc>>::HasArcs;
using CacheBaseImpl<CacheState<Arc>>::SetArcs;
using CacheBaseImpl<CacheState<Arc>>::SetFinal;
using CacheBaseImpl<CacheState<Arc>>::SetStart;
struct Element {
Element() {}
Element(StateId s, Weight weight_) : state(s), weight(std::move(weight_)) {}
StateId state; // Input state ID.
Weight weight; // Residual weight.
};
FactorWeightFstImpl(const Fst<Arc> &fst, const FactorWeightOptions<Arc> &opts)
: CacheImpl<Arc>(opts),
fst_(fst.Copy()),
delta_(opts.delta),
mode_(opts.mode),
final_ilabel_(opts.final_ilabel),
final_olabel_(opts.final_olabel),
increment_final_ilabel_(opts.increment_final_ilabel),
increment_final_olabel_(opts.increment_final_olabel) {
SetType("factor_weight");
const auto props = fst.Properties(kFstProperties, false);
SetProperties(FactorWeightProperties(props), kCopyProperties);
SetInputSymbols(fst.InputSymbols());
SetOutputSymbols(fst.OutputSymbols());
if (mode_ == 0) {
LOG(WARNING) << "FactorWeightFst: Factor mode is set to 0; "
<< "factoring neither arc weights nor final weights";
}
}
FactorWeightFstImpl(const FactorWeightFstImpl<Arc, FactorIterator> &impl)
: CacheImpl<Arc>(impl),
fst_(impl.fst_->Copy(true)),
delta_(impl.delta_),
mode_(impl.mode_),
final_ilabel_(impl.final_ilabel_),
final_olabel_(impl.final_olabel_),
increment_final_ilabel_(impl.increment_final_ilabel_),
increment_final_olabel_(impl.increment_final_olabel_) {
SetType("factor_weight");
SetProperties(impl.Properties(), kCopyProperties);
SetInputSymbols(impl.InputSymbols());
SetOutputSymbols(impl.OutputSymbols());
}
StateId Start() {
if (!HasStart()) {
const auto s = fst_->Start();
if (s == kNoStateId) return kNoStateId;
SetStart(FindState(Element(fst_->Start(), Weight::One())));
}
return CacheImpl<Arc>::Start();
}
Weight Final(StateId s) {
if (!HasFinal(s)) {
const auto &element = elements_[s];
// TODO(sorenj): fix so cast is unnecessary
const auto weight =
element.state == kNoStateId
? element.weight
: (Weight)Times(element.weight, fst_->Final(element.state));
FactorIterator siter(weight);
if (!(mode_ & kFactorFinalWeights) || siter.Done()) {
SetFinal(s, weight);
} else {
SetFinal(s, Weight::Zero());
}
}
return CacheImpl<Arc>::Final(s);
}
size_t NumArcs(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumArcs(s);
}
size_t NumInputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumInputEpsilons(s);
}
size_t NumOutputEpsilons(StateId s) {
if (!HasArcs(s)) Expand(s);
return CacheImpl<Arc>::NumOutputEpsilons(s);
}
uint64 Properties() const override { return Properties(kFstProperties); }
// Sets error if found, and returns other FST impl properties.
uint64 Properties(uint64 mask) const override {
if ((mask & kError) && fst_->Properties(kError, false)) {
SetProperties(kError, kError);
}
return FstImpl<Arc>::Properties(mask);
}
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) {
if (!HasArcs(s)) Expand(s);
CacheImpl<Arc>::InitArcIterator(s, data);
}
// Finds state corresponding to an element, creating new state if element not
// found.
StateId FindState(const Element &element) {
if (!(mode_ & kFactorArcWeights) && element.weight == Weight::One() &&
element.state != kNoStateId) {
while (unfactored_.size() <= element.state)
unfactored_.push_back(kNoStateId);
if (unfactored_[element.state] == kNoStateId) {
unfactored_[element.state] = elements_.size();
elements_.push_back(element);
}
return unfactored_[element.state];
} else {
const auto insert_result =
element_map_.insert(std::make_pair(element, elements_.size()));
if (insert_result.second) {
elements_.push_back(element);
}
return insert_result.first->second;
}
}
// Computes the outgoing transitions from a state, creating new destination
// states as needed.
void Expand(StateId s) {
const auto element = elements_[s];
if (element.state != kNoStateId) {
for (ArcIterator<Fst<Arc>> ait(*fst_, element.state); !ait.Done();
ait.Next()) {
const auto &arc = ait.Value();
const auto weight = Times(element.weight, arc.weight);
FactorIterator fiter(weight);
if (!(mode_ & kFactorArcWeights) || fiter.Done()) {
const auto dest = FindState(Element(arc.nextstate, Weight::One()));
PushArc(s, Arc(arc.ilabel, arc.olabel, weight, dest));
} else {
for (; !fiter.Done(); fiter.Next()) {
const auto &pair = fiter.Value();
const auto dest =
FindState(Element(arc.nextstate, pair.second.Quantize(delta_)));
PushArc(s, Arc(arc.ilabel, arc.olabel, pair.first, dest));
}
}
}
}
if ((mode_ & kFactorFinalWeights) &&
((element.state == kNoStateId) ||
(fst_->Final(element.state) != Weight::Zero()))) {
const auto weight =
element.state == kNoStateId
? element.weight
: Times(element.weight, fst_->Final(element.state));
auto ilabel = final_ilabel_;
auto olabel = final_olabel_;
for (FactorIterator fiter(weight); !fiter.Done(); fiter.Next()) {
const auto &pair = fiter.Value();
const auto dest =
FindState(Element(kNoStateId, pair.second.Quantize(delta_)));
PushArc(s, Arc(ilabel, olabel, pair.first, dest));
if (increment_final_ilabel_) ++ilabel;
if (increment_final_olabel_) ++olabel;
}
}
SetArcs(s);
}
private:
// Equality function for Elements, assume weights have been quantized.
class ElementEqual {
public:
bool operator()(const Element &x, const Element &y) const {
return x.state == y.state && x.weight == y.weight;
}
};
// Hash function for Elements to Fst states.
class ElementKey {
public:
size_t operator()(const Element &x) const {
static constexpr auto prime = 7853;
return static_cast<size_t>(x.state * prime + x.weight.Hash());
}
};
using ElementMap =
std::unordered_map<Element, StateId, ElementKey, ElementEqual>;
std::unique_ptr<const Fst<Arc>> fst_;
float delta_;
uint32 mode_; // Factoring arc and/or final weights.
Label final_ilabel_; // ilabel of arc created when factoring final weights.
Label final_olabel_; // olabel of arc created when factoring final weights.
bool increment_final_ilabel_; // When factoring final weights results in
bool increment_final_olabel_; // mutiple arcs, increment labels?
std::vector<Element> elements_; // mapping from FST state to Element.
ElementMap element_map_; // mapping from Element to FST state.
// Mapping between old/new StateId for states that do not need to be factored
// when mode_ is 0 or kFactorFinalWeights.
std::vector<StateId> unfactored_;
};
} // namespace internal
// FactorWeightFst takes as template parameter a FactorIterator as defined
// above. The result of weight factoring is a transducer equivalent to the
// input whose path weights have been factored according to the FactorIterator.
// States and transitions will be added as necessary. The algorithm is a
// generalization to arbitrary weights of the second step of the input
// epsilon-normalization algorithm.
//
// This class attaches interface to implementation and handles reference
// counting, delegating most methods to ImplToFst.
template <class A, class FactorIterator>
class FactorWeightFst
: public ImplToFst<internal::FactorWeightFstImpl<A, FactorIterator>> {
public:
using Arc = A;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Store = DefaultCacheStore<Arc>;
using State = typename Store::State;
using Impl = internal::FactorWeightFstImpl<Arc, FactorIterator>;
friend class ArcIterator<FactorWeightFst<Arc, FactorIterator>>;
friend class StateIterator<FactorWeightFst<Arc, FactorIterator>>;
explicit FactorWeightFst(const Fst<Arc> &fst)
: ImplToFst<Impl>(
std::make_shared<Impl>(fst, FactorWeightOptions<Arc>())) {}
FactorWeightFst(const Fst<Arc> &fst, const FactorWeightOptions<Arc> &opts)
: ImplToFst<Impl>(std::make_shared<Impl>(fst, opts)) {}
// See Fst<>::Copy() for doc.
FactorWeightFst(const FactorWeightFst<Arc, FactorIterator> &fst, bool copy)
: ImplToFst<Impl>(fst, copy) {}
// Get a copy of this FactorWeightFst. See Fst<>::Copy() for further doc.
FactorWeightFst<Arc, FactorIterator> *Copy(bool copy = false) const override {
return new FactorWeightFst<Arc, FactorIterator>(*this, copy);
}
inline void InitStateIterator(StateIteratorData<Arc> *data) const override;
void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override {
GetMutableImpl()->InitArcIterator(s, data);
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
FactorWeightFst &operator=(const FactorWeightFst &) = delete;
};
// Specialization for FactorWeightFst.
template <class Arc, class FactorIterator>
class StateIterator<FactorWeightFst<Arc, FactorIterator>>
: public CacheStateIterator<FactorWeightFst<Arc, FactorIterator>> {
public:
explicit StateIterator(const FactorWeightFst<Arc, FactorIterator> &fst)
: CacheStateIterator<FactorWeightFst<Arc, FactorIterator>>(
fst, fst.GetMutableImpl()) {}
};
// Specialization for FactorWeightFst.
template <class Arc, class FactorIterator>
class ArcIterator<FactorWeightFst<Arc, FactorIterator>>
: public CacheArcIterator<FactorWeightFst<Arc, FactorIterator>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const FactorWeightFst<Arc, FactorIterator> &fst, StateId s)
: CacheArcIterator<FactorWeightFst<Arc, FactorIterator>>(
fst.GetMutableImpl(), s) {
if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s);
}
};
template <class Arc, class FactorIterator>
inline void FactorWeightFst<Arc, FactorIterator>::InitStateIterator(
StateIteratorData<Arc> *data) const {
data->base = new StateIterator<FactorWeightFst<Arc, FactorIterator>>(*this);
}
} // namespace fst
#endif // FST_FACTOR_WEIGHT_H_
| 0 |
coqui_public_repos/TTS | coqui_public_repos/TTS/TTS/VERSION | 0.22.0
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/notebooks/README.md | # Python Notebooks for 🐸 STT
| Notebook title | Language(s) | Link to Colab |
|----------------|---------------|-------------|
|Train your first 🐸 STT model | English | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/coqui-ai/STT/blob/main/notebooks/train_your_first_coqui_STT_model.ipynb) |
|Easy Transfer learning | English --> Russian | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/coqui-ai/STT/blob/main/notebooks/easy_transfer_learning.ipynb)|
| Train a model with Common Voice | Serbian | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/coqui-ai/STT/blob/main/notebooks/train_with_common_voice.ipynb) |
| Create a custom Speech-to-Text model for your voice | English | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/coqui-ai/STT/blob/main/notebooks/train_personal_model_with_common_voice.ipynb) |
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/models/overflow.py | import os
from typing import Dict, List, Union
import torch
from coqpit import Coqpit
from torch import nn
from trainer.logging.tensorboard_logger import TensorboardLogger
from TTS.tts.layers.overflow.common_layers import Encoder, OverflowUtils
from TTS.tts.layers.overflow.decoder import Decoder
from TTS.tts.layers.overflow.neural_hmm import NeuralHMM
from TTS.tts.layers.overflow.plotting_utils import (
get_spec_from_most_probable_state,
plot_transition_probabilities_to_numpy,
)
from TTS.tts.models.base_tts import BaseTTS
from TTS.tts.utils.speakers import SpeakerManager
from TTS.tts.utils.text.tokenizer import TTSTokenizer
from TTS.tts.utils.visual import plot_alignment, plot_spectrogram
from TTS.utils.generic_utils import format_aux_input
from TTS.utils.io import load_fsspec
class Overflow(BaseTTS):
"""OverFlow TTS model.
Paper::
https://arxiv.org/abs/2211.06892
Paper abstract::
Neural HMMs are a type of neural transducer recently proposed for
sequence-to-sequence modelling in text-to-speech. They combine the best features
of classic statistical speech synthesis and modern neural TTS, requiring less
data and fewer training updates, and are less prone to gibberish output caused
by neural attention failures. In this paper, we combine neural HMM TTS with
normalising flows for describing the highly non-Gaussian distribution of speech
acoustics. The result is a powerful, fully probabilistic model of durations and
acoustics that can be trained using exact maximum likelihood. Compared to
dominant flow-based acoustic models, our approach integrates autoregression for
improved modelling of long-range dependences such as utterance-level prosody.
Experiments show that a system based on our proposal gives more accurate
pronunciations and better subjective speech quality than comparable methods,
whilst retaining the original advantages of neural HMMs. Audio examples and code
are available at https://shivammehta25.github.io/OverFlow/.
Note:
- Neural HMMs uses flat start initialization i.e it computes the means and std and transition probabilities
of the dataset and uses them to initialize the model. This benefits the model and helps with faster learning
If you change the dataset or want to regenerate the parameters change the `force_generate_statistics` and
`mel_statistics_parameter_path` accordingly.
- To enable multi-GPU training, set the `use_grad_checkpointing=False` in config.
This will significantly increase the memory usage. This is because to compute
the actual data likelihood (not an approximation using MAS/Viterbi) we must use
all the states at the previous time step during the forward pass to decide the
probability distribution at the current step i.e the difference between the forward
algorithm and viterbi approximation.
Check :class:`TTS.tts.configs.overflow.OverFlowConfig` for class arguments.
"""
def __init__(
self,
config: "OverFlowConfig",
ap: "AudioProcessor" = None,
tokenizer: "TTSTokenizer" = None,
speaker_manager: SpeakerManager = None,
):
super().__init__(config, ap, tokenizer, speaker_manager)
# pass all config fields to `self`
# for fewer code change
self.config = config
for key in config:
setattr(self, key, config[key])
self.decoder_output_dim = config.out_channels
self.encoder = Encoder(config.num_chars, config.state_per_phone, config.encoder_in_out_features)
self.neural_hmm = NeuralHMM(
frame_channels=self.out_channels,
ar_order=self.ar_order,
deterministic_transition=self.deterministic_transition,
encoder_dim=self.encoder_in_out_features,
prenet_type=self.prenet_type,
prenet_dim=self.prenet_dim,
prenet_n_layers=self.prenet_n_layers,
prenet_dropout=self.prenet_dropout,
prenet_dropout_at_inference=self.prenet_dropout_at_inference,
memory_rnn_dim=self.memory_rnn_dim,
outputnet_size=self.outputnet_size,
flat_start_params=self.flat_start_params,
std_floor=self.std_floor,
use_grad_checkpointing=self.use_grad_checkpointing,
)
self.decoder = Decoder(
self.out_channels,
self.hidden_channels_dec,
self.kernel_size_dec,
self.dilation_rate,
self.num_flow_blocks_dec,
self.num_block_layers,
dropout_p=self.dropout_p_dec,
num_splits=self.num_splits,
num_squeeze=self.num_squeeze,
sigmoid_scale=self.sigmoid_scale,
c_in_channels=self.c_in_channels,
)
self.register_buffer("mean", torch.tensor(0))
self.register_buffer("std", torch.tensor(1))
def update_mean_std(self, statistics_dict: Dict):
self.mean.data = torch.tensor(statistics_dict["mean"])
self.std.data = torch.tensor(statistics_dict["std"])
def preprocess_batch(self, text, text_len, mels, mel_len):
if self.mean.item() == 0 or self.std.item() == 1:
statistics_dict = torch.load(self.mel_statistics_parameter_path)
self.update_mean_std(statistics_dict)
mels = self.normalize(mels)
return text, text_len, mels, mel_len
def normalize(self, x):
return x.sub(self.mean).div(self.std)
def inverse_normalize(self, x):
return x.mul(self.std).add(self.mean)
def forward(self, text, text_len, mels, mel_len):
"""
Forward pass for training and computing the log likelihood of a given batch.
Shapes:
Shapes:
text: :math:`[B, T_in]`
text_len: :math:`[B]`
mels: :math:`[B, T_out, C]`
mel_len: :math:`[B]`
"""
text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len)
encoder_outputs, encoder_output_len = self.encoder(text, text_len)
z, z_lengths, logdet = self.decoder(mels.transpose(1, 2), mel_len)
log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm(
encoder_outputs, encoder_output_len, z, z_lengths
)
outputs = {
"log_probs": log_probs + logdet,
"alignments": fwd_alignments,
"transition_vectors": transition_vectors,
"means": means,
}
return outputs
@staticmethod
def _training_stats(batch):
stats = {}
stats["avg_text_length"] = batch["text_lengths"].float().mean()
stats["avg_spec_length"] = batch["mel_lengths"].float().mean()
stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean()
stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean()
return stats
def train_step(self, batch: dict, criterion: nn.Module):
text_input = batch["text_input"]
text_lengths = batch["text_lengths"]
mel_input = batch["mel_input"]
mel_lengths = batch["mel_lengths"]
outputs = self.forward(
text=text_input,
text_len=text_lengths,
mels=mel_input,
mel_len=mel_lengths,
)
loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum()))
# for printing useful statistics on terminal
loss_dict.update(self._training_stats(batch))
return outputs, loss_dict
def eval_step(self, batch: Dict, criterion: nn.Module):
return self.train_step(batch, criterion)
def _format_aux_input(self, aux_input: Dict, default_input_dict):
"""Set missing fields to their default value.
Args:
aux_inputs (Dict): Dictionary containing the auxiliary inputs.
"""
default_input_dict = default_input_dict.copy()
default_input_dict.update(
{
"sampling_temp": self.sampling_temp,
"max_sampling_time": self.max_sampling_time,
"duration_threshold": self.duration_threshold,
}
)
if aux_input:
return format_aux_input(default_input_dict, aux_input)
return default_input_dict
@torch.no_grad()
def inference(
self,
text: torch.Tensor,
aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None},
): # pylint: disable=dangerous-default-value
"""Sampling from the model
Args:
text (torch.Tensor): :math:`[B, T_in]`
aux_inputs (_type_, optional): _description_. Defaults to None.
Returns:
outputs: Dictionary containing the following
- mel (torch.Tensor): :math:`[B, T_out, C]`
- hmm_outputs_len (torch.Tensor): :math:`[B]`
- state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch.
- input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM.
- output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM.
"""
default_input_dict = {
"x_lengths": torch.sum(text != 0, dim=1),
}
aux_input = self._format_aux_input(aux_input, default_input_dict)
encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"])
outputs = self.neural_hmm.inference(
encoder_outputs,
encoder_output_len,
sampling_temp=aux_input["sampling_temp"],
max_sampling_time=aux_input["max_sampling_time"],
duration_threshold=aux_input["duration_threshold"],
)
mels, mel_outputs_len, _ = self.decoder(
outputs["hmm_outputs"].transpose(1, 2), outputs["hmm_outputs_len"], reverse=True
)
mels = self.inverse_normalize(mels.transpose(1, 2))
outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len})
outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"])
return outputs
@staticmethod
def get_criterion():
return NLLLoss()
@staticmethod
def init_from_config(config: "OverFlowConfig", samples: Union[List[List], List[Dict]] = None, verbose=True):
"""Initiate model from config
Args:
config (VitsConfig): Model config.
samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training.
Defaults to None.
verbose (bool): If True, print init messages. Defaults to True.
"""
from TTS.utils.audio import AudioProcessor
ap = AudioProcessor.init_from_config(config, verbose)
tokenizer, new_config = TTSTokenizer.init_from_config(config)
speaker_manager = SpeakerManager.init_from_config(config, samples)
return Overflow(new_config, ap, tokenizer, speaker_manager)
def load_checkpoint(
self, config: Coqpit, checkpoint_path: str, eval: bool = False, strict: bool = True, cache=False
): # pylint: disable=unused-argument, redefined-builtin
state = load_fsspec(checkpoint_path, map_location=torch.device("cpu"))
self.load_state_dict(state["model"])
if eval:
self.eval()
self.decoder.store_inverse()
assert not self.training
def on_init_start(self, trainer):
"""If the current dataset does not have normalisation statistics and initialisation transition_probability it computes them otherwise loads."""
if not os.path.isfile(trainer.config.mel_statistics_parameter_path) or trainer.config.force_generate_statistics:
dataloader = trainer.get_train_dataloader(
training_assets=None, samples=trainer.train_samples, verbose=False
)
print(
f" | > Data parameters not found for: {trainer.config.mel_statistics_parameter_path}. Computing mel normalization parameters..."
)
data_mean, data_std, init_transition_prob = OverflowUtils.get_data_parameters_for_flat_start(
dataloader, trainer.config.out_channels, trainer.config.state_per_phone
)
print(
f" | > Saving data parameters to: {trainer.config.mel_statistics_parameter_path}: value: {data_mean, data_std, init_transition_prob}"
)
statistics = {
"mean": data_mean.item(),
"std": data_std.item(),
"init_transition_prob": init_transition_prob.item(),
}
torch.save(statistics, trainer.config.mel_statistics_parameter_path)
else:
print(
f" | > Data parameters found for: {trainer.config.mel_statistics_parameter_path}. Loading mel normalization parameters..."
)
statistics = torch.load(trainer.config.mel_statistics_parameter_path)
data_mean, data_std, init_transition_prob = (
statistics["mean"],
statistics["std"],
statistics["init_transition_prob"],
)
print(f" | > Data parameters loaded with value: {data_mean, data_std, init_transition_prob}")
trainer.config.flat_start_params["transition_p"] = (
init_transition_prob.item() if torch.is_tensor(init_transition_prob) else init_transition_prob
)
OverflowUtils.update_flat_start_transition(trainer.model, init_transition_prob)
trainer.model.update_mean_std(statistics)
@torch.inference_mode()
def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use, unused-argument
alignments, transition_vectors = outputs["alignments"], outputs["transition_vectors"]
means = torch.stack(outputs["means"], dim=1)
figures = {
"alignment": plot_alignment(alignments[0].exp(), title="Forward alignment", fig_size=(20, 20)),
"log_alignment": plot_alignment(
alignments[0].exp(), title="Forward log alignment", plot_log=True, fig_size=(20, 20)
),
"transition_vectors": plot_alignment(transition_vectors[0], title="Transition vectors", fig_size=(20, 20)),
"mel_from_most_probable_state": plot_spectrogram(
get_spec_from_most_probable_state(alignments[0], means[0], self.decoder), fig_size=(12, 3)
),
"mel_target": plot_spectrogram(batch["mel_input"][0], fig_size=(12, 3)),
}
# sample one item from the batch -1 will give the smalles item
print(" | > Synthesising audio from the model...")
inference_output = self.inference(
batch["text_input"][-1].unsqueeze(0), aux_input={"x_lengths": batch["text_lengths"][-1].unsqueeze(0)}
)
figures["synthesised"] = plot_spectrogram(inference_output["model_outputs"][0], fig_size=(12, 3))
states = [p[1] for p in inference_output["input_parameters"][0]]
transition_probability_synthesising = [p[2].cpu().numpy() for p in inference_output["output_parameters"][0]]
for i in range((len(transition_probability_synthesising) // 200) + 1):
start = i * 200
end = (i + 1) * 200
figures[f"synthesised_transition_probabilities/{i}"] = plot_transition_probabilities_to_numpy(
states[start:end], transition_probability_synthesising[start:end]
)
audio = ap.inv_melspectrogram(inference_output["model_outputs"][0].T.cpu().numpy())
return figures, {"audios": audio}
def train_log(
self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int
): # pylint: disable=unused-argument
"""Log training progress."""
figures, audios = self._create_logs(batch, outputs, self.ap)
logger.train_figures(steps, figures)
logger.train_audios(steps, audios, self.ap.sample_rate)
def eval_log(
self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int
): # pylint: disable=unused-argument
"""Compute and log evaluation metrics."""
# Plot model parameters histograms
if isinstance(logger, TensorboardLogger):
# I don't know if any other loggers supports this
for tag, value in self.named_parameters():
tag = tag.replace(".", "/")
logger.writer.add_histogram(tag, value.data.cpu().numpy(), steps)
figures, audios = self._create_logs(batch, outputs, self.ap)
logger.eval_figures(steps, figures)
logger.eval_audios(steps, audios, self.ap.sample_rate)
def test_log(
self, outputs: dict, logger: "Logger", assets: dict, steps: int # pylint: disable=unused-argument
) -> None:
logger.test_audios(steps, outputs[1], self.ap.sample_rate)
logger.test_figures(steps, outputs[0])
class NLLLoss(nn.Module):
"""Negative log likelihood loss."""
def forward(self, log_prob: torch.Tensor) -> dict: # pylint: disable=no-self-use
"""Compute the loss.
Args:
logits (Tensor): [B, T, D]
Returns:
Tensor: [1]
"""
return_dict = {}
return_dict["loss"] = -log_prob.mean()
return return_dict
| 0 |
coqui_public_repos/inference-engine/third_party/cereal/include/cereal | coqui_public_repos/inference-engine/third_party/cereal/include/cereal/types/forward_list.hpp | /*! \file forward_list.hpp
\brief Support for types found in \<forward_list\>
\ingroup STLSupport */
/*
Copyright (c) 2014, Randolph Voorhies, Shane Grant
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of cereal nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CEREAL_TYPES_FORWARD_LIST_HPP_
#define CEREAL_TYPES_FORWARD_LIST_HPP_
#include "cereal/cereal.hpp"
#include <forward_list>
namespace cereal
{
//! Saving for std::forward_list all other types
template <class Archive, class T, class A> inline
void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::forward_list<T, A> const & forward_list )
{
// write the size - note that this is slow because we need to traverse
// the entire list. there are ways we could avoid this but this was chosen
// since it works in the most general fashion with any archive type
size_type const size = std::distance( forward_list.begin(), forward_list.end() );
ar( make_size_tag( size ) );
// write the list
for( const auto & i : forward_list )
ar( i );
}
//! Loading for std::forward_list all other types from
template <class Archive, class T, class A>
void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::forward_list<T, A> & forward_list )
{
size_type size;
ar( make_size_tag( size ) );
forward_list.resize( static_cast<size_t>( size ) );
for( auto & i : forward_list )
ar( i );
}
} // namespace cereal
#endif // CEREAL_TYPES_FORWARD_LIST_HPP_
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common/const_pointer_container.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <type_traits>
namespace onnxruntime {
/**
Container has T* entries. e.g. std::vector<T*>, and this class provides const access to those
via iterators and direct access, as the standard behavior only makes the pointer constant,
and not what is pointed too. i.e. you get a const pointer to T not a pointer to const T without this wrapper.
See https://stackoverflow.com/questions/8017036/understanding-const-iterator-with-pointers
*/
template <typename Container>
class ConstPointerContainer {
public:
using T = typename std::remove_pointer<typename Container::value_type>::type;
class ConstIterator {
public:
using const_iterator = typename Container::const_iterator;
using iterator_category = std::input_iterator_tag;
using value_type = T*;
using difference_type = std::ptrdiff_t;
using pointer = T**;
using reference = T*&;
/** Construct iterator for container that will return const T* entries.*/
explicit ConstIterator(const_iterator position) noexcept : current_{position}, item_{nullptr} {}
ConstIterator(const ConstIterator& other) = default;
ConstIterator& operator=(const ConstIterator& other) = default;
bool operator==(const ConstIterator& other) const noexcept { return current_ == other.current_; }
bool operator!=(const ConstIterator& other) const noexcept { return current_ != other.current_; }
ConstIterator& operator++() {
++current_;
return *this;
}
ConstIterator operator++(int) {
ConstIterator tmp{*this};
++(*this);
return tmp;
}
const T*& operator*() const {
item_ = *current_;
return item_;
}
const T** operator->() const { return &(operator*()); };
private:
const_iterator current_;
mutable const T* item_;
};
/**
Construct wrapper class that will provide const access to the pointers in a container of non-const pointers.
@param data Container with non-const pointers. e.g. std::vector<T*>
*/
explicit ConstPointerContainer(const Container& data) noexcept : data_(data) {}
size_t size() const noexcept { return data_.size(); }
bool empty() const noexcept { return data_.empty(); }
ConstIterator cbegin() const noexcept { return ConstIterator(data_.cbegin()); }
ConstIterator cend() const noexcept { return ConstIterator(data_.cend()); }
ConstIterator begin() const noexcept { return ConstIterator(data_.cbegin()); }
ConstIterator end() const noexcept { return ConstIterator(data_.cend()); }
const T* operator[](size_t index) const { return data_[index]; }
const T* at(size_t index) const {
ORT_ENFORCE(index < data_.size());
return data_[index];
}
private:
const Container& data_;
};
} // namespace onnxruntime
| 0 |
coqui_public_repos/inference-engine/third_party | coqui_public_repos/inference-engine/third_party/tensorflow/platform.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_PLATFORM_PLATFORM_DEFINE_H_
#define TENSORFLOW_PLATFORM_PLATFORM_DEFINE_H_
// Set one PLATFORM_* macro and set IS_MOBILE_PLATFORM if the platform is for
// mobile.
#if !defined(PLATFORM_POSIX) && !defined(PLATFORM_GOOGLE) && \
!defined(PLATFORM_POSIX_ANDROID) && !defined(PLATFORM_GOOGLE_ANDROID) && \
!defined(PLATFORM_WINDOWS)
// Choose which platform we are on.
#if defined(ANDROID) || defined(__ANDROID__)
#define PLATFORM_POSIX_ANDROID
#define IS_MOBILE_PLATFORM
#elif defined(__APPLE__)
#include "TargetConditionals.h"
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
#define PLATFORM_POSIX_IOS
#define IS_MOBILE_PLATFORM
#else
// If no platform specified, use:
#define PLATFORM_POSIX
#endif
#elif defined(_WIN32)
#define PLATFORM_WINDOWS
#elif defined(__arm__)
#define PLATFORM_POSIX
#elif defined(__EMSCRIPTEN__)
#define PLATFORM_PORTABLE_GOOGLE
#define PLATFORM_POSIX
// Require an outside macro to tell us if we're building for Raspberry Pi or
// another ARM device that's not a mobile platform.
#if !defined(RASPBERRY_PI) && !defined(ARM_NON_MOBILE)
#define IS_MOBILE_PLATFORM
#endif // !defined(RASPBERRY_PI) && !defined(ARM_NON_MOBILE)
#else
// If no platform specified, use:
#define PLATFORM_POSIX
#endif
#endif
// Look for both gcc/clang and Visual Studio macros indicating we're compiling
// for an x86 device.
#if defined(__x86_64__) || defined(__amd64__) || defined(_M_IX86) || \
defined(_M_X64)
#define PLATFORM_IS_X86
#endif
#endif // TENSORFLOW_PLATFORM_PLATFORM_DEFINE_H_
| 0 |
coqui_public_repos/coqpit | coqui_public_repos/coqpit/tests/test_parse_known_argparse.py | from dataclasses import asdict, dataclass, field
from typing import List
from coqpit.coqpit import Coqpit, check_argument
@dataclass
class SimplerConfig(Coqpit):
val_a: int = field(default=None, metadata={"help": "this is val_a"})
@dataclass
class SimpleConfig(Coqpit):
val_a: int = field(default=10, metadata={"help": "this is val_a of SimpleConfig"})
val_b: int = field(default=None, metadata={"help": "this is val_b"})
val_c: str = "Coqpit is great!"
mylist_with_default: List[SimplerConfig] = field(
default_factory=lambda: [SimplerConfig(val_a=100), SimplerConfig(val_a=999)],
metadata={"help": "list of SimplerConfig"},
)
def check_values(
self,
):
"""Check config fields"""
c = asdict(self)
check_argument("val_a", c, restricted=True, min_val=10, max_val=2056)
check_argument("val_b", c, restricted=True, min_val=128, max_val=4058, allow_none=True)
check_argument("val_c", c, restricted=True)
def test_parse_argparse():
unknown_args = ["--coqpit.arg_does_not_exist", "111"]
args = []
args.extend(["--coqpit.val_a", "222"])
args.extend(["--coqpit.val_b", "999"])
args.extend(["--coqpit.val_c", "this is different"])
args.extend(["--coqpit.mylist_with_default.0.val_a", "222"])
args.extend(["--coqpit.mylist_with_default.1.val_a", "111"])
args.extend(unknown_args)
# initial config
config = SimpleConfig()
print(config.pprint())
# reference config that we like to match with the config above
config_ref = SimpleConfig(
val_a=222,
val_b=999,
val_c="this is different",
mylist_with_default=[SimplerConfig(val_a=222), SimplerConfig(val_a=111)],
)
# create and init argparser with Coqpit
parser = config.init_argparse()
parser.print_help()
# parse the argsparser
unknown = config.parse_known_args(args)
config.pprint()
# check the current config with the reference config
assert config == config_ref
assert unknown == unknown_args
def test_parse_edited_argparse():
"""calling `parse_known_argparse` after some modifications in the config values.
`parse_known_argparse` should keep the modified values if not defined in argv"""
unknown_args = ["--coqpit.arg_does_not_exist", "111"]
args = []
args.extend(["--coqpit.mylist_with_default.1.val_a", "111"])
args.extend(unknown_args)
# initial config with modified values
config = SimpleConfig()
config.val_a = 333
config.val_b = 444
config.val_c = "this is different"
config.mylist_with_default[0].val_a = 777
print(config.pprint())
# reference config that we like to match with the config above
config_ref = SimpleConfig(
val_a=333,
val_b=444,
val_c="this is different",
mylist_with_default=[SimplerConfig(val_a=777), SimplerConfig(val_a=111)],
)
# create and init argparser with Coqpit
parser = config.init_argparse()
parser.print_help()
# parse the argsparser
unknown = config.parse_known_args(args)
config.pprint()
# check the current config with the reference config
assert config == config_ref
assert unknown == unknown_args
| 0 |
coqui_public_repos | coqui_public_repos/STT/ds_lib.supp | {
stt_tflite_error_reporter
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN6tflite20DefaultErrorReporterEv
fun:_ZN16TFLiteModelState4initEPKc
fun:STT_CreateModel
fun:main
}
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/util/sorted_uniform.hh | #ifndef UTIL_SORTED_UNIFORM_H
#define UTIL_SORTED_UNIFORM_H
#include <algorithm>
#include <cstddef>
#include <cassert>
#include <stdint.h>
namespace util {
template <class T> class IdentityAccessor {
public:
typedef T Key;
T operator()(const T *in) const { return *in; }
};
struct Pivot64 {
static inline std::size_t Calc(uint64_t off, uint64_t range, std::size_t width) {
std::size_t ret = static_cast<std::size_t>(static_cast<float>(off) / static_cast<float>(range) * static_cast<float>(width));
// Cap for floating point rounding
return (ret < width) ? ret : width - 1;
}
};
// Use when off * width is <2^64. This is guaranteed when each of them is actually a 32-bit value.
struct Pivot32 {
static inline std::size_t Calc(uint64_t off, uint64_t range, uint64_t width) {
return static_cast<std::size_t>((off * width) / (range + 1));
}
};
// Usage: PivotSelect<sizeof(DataType)>::T
template <unsigned> struct PivotSelect;
template <> struct PivotSelect<8> { typedef Pivot64 T; };
template <> struct PivotSelect<4> { typedef Pivot32 T; };
template <> struct PivotSelect<2> { typedef Pivot32 T; };
/* Binary search. */
template <class Iterator, class Accessor> bool BinaryFind(
const Accessor &accessor,
Iterator begin,
Iterator end,
const typename Accessor::Key key, Iterator &out) {
while (end > begin) {
Iterator pivot(begin + (end - begin) / 2);
typename Accessor::Key mid(accessor(pivot));
if (mid < key) {
begin = pivot + 1;
} else if (mid > key) {
end = pivot;
} else {
out = pivot;
return true;
}
}
return false;
}
// Search the range [before_it + 1, after_it - 1] for key.
// Preconditions:
// before_v <= key <= after_v
// before_v <= all values in the range [before_it + 1, after_it - 1] <= after_v
// range is sorted.
template <class Iterator, class Accessor, class Pivot> bool BoundedSortedUniformFind(
const Accessor &accessor,
Iterator before_it, typename Accessor::Key before_v,
Iterator after_it, typename Accessor::Key after_v,
const typename Accessor::Key key, Iterator &out) {
while (after_it - before_it > 1) {
Iterator pivot(before_it + (1 + Pivot::Calc(key - before_v, after_v - before_v, after_it - before_it - 1)));
typename Accessor::Key mid(accessor(pivot));
if (mid < key) {
before_it = pivot;
before_v = mid;
} else if (mid > key) {
after_it = pivot;
after_v = mid;
} else {
out = pivot;
return true;
}
}
return false;
}
template <class Iterator, class Accessor, class Pivot> bool SortedUniformFind(const Accessor &accessor, Iterator begin, Iterator end, const typename Accessor::Key key, Iterator &out) {
if (begin == end) return false;
typename Accessor::Key below(accessor(begin));
if (key <= below) {
if (key == below) { out = begin; return true; }
return false;
}
// Make the range [begin, end].
--end;
typename Accessor::Key above(accessor(end));
if (key >= above) {
if (key == above) { out = end; return true; }
return false;
}
return BoundedSortedUniformFind<Iterator, Accessor, Pivot>(accessor, begin, below, end, above, key, out);
}
} // namespace util
#endif // UTIL_SORTED_UNIFORM_H
| 0 |
coqui_public_repos/STT-examples | coqui_public_repos/STT-examples/batch_processing/driver.py | import glob
import json
import os
from os.path import expanduser
import click
import delegator
# first loop over the files
# convert them to wave
# record things in 16000hz in the future or you gret this
# Warning: original sample rate (44100) is different than 16000h.z Resampling might produce erratic speech recognition.
@click.command()
@click.option("--dirname", type=click.Path(exists=True, resolve_path=True))
@click.option("--ext", default=".mp3")
@click.option(
"--model",
default="model.tflite",
type=click.Path(exists=True, resolve_path=True),
)
@click.option(
"--scorer",
default="huge-vocab.scorer",
type=click.Path(exists=True, resolve_path=True),
)
# manage my library of podcasts
def main(dirname, ext, model, scorer):
print("main")
model = expanduser(model)
scorer = expanduser(scorer)
pattern = dirname + "/" + "*" + ext
audiorate = "16000"
print(pattern)
for filename in glob.glob(pattern):
print(filename)
wavefile = filename + ".wav"
convert_command = " ".join(
[
"ffmpeg",
"-i",
"'{}'".format(filename),
"-ar",
audiorate,
"'{}'".format(wavefile),
]
)
if not os.path.isfile(wavefile):
print(convert_command)
r = delegator.run(convert_command)
print(r.out)
else:
print("skipping wave conversion that exists")
command = " ".join(
[
"stt",
"--model",
model,
"--scorer",
scorer,
"--audio",
"'{}'".format(wavefile),
# "--extended",
"--json",
]
)
print(command)
r = delegator.run(command)
with open(filename + ".json", "w") as fo:
print(r.out)
fo.write(r.out)
if __name__ == "__main__":
main()
| 0 |
coqui_public_repos/TTS/TTS/tts/layers | coqui_public_repos/TTS/TTS/tts/layers/tortoise/autoregressive.py | # AGPL: a notification must be added stating that changes have been made to that file.
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from TTS.tts.layers.tortoise.arch_utils import AttentionBlock, TypicalLogitsWarper
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
def _p(t):
return t and (len(t), len(t[0]), t[0][0].shape) # kv_cache debug
class ResBlock(nn.Module):
"""
Basic residual convolutional block that uses GroupNorm.
"""
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan // 8, chan),
nn.ReLU(),
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan // 8, chan),
)
def forward(self, x):
return F.relu(self.net(x) + x)
class GPT2InferenceModel(GPT2PreTrainedModel):
def __init__(self, config, gpt, text_pos_emb, embeddings, norm, linear, kv_cache):
super().__init__(config)
self.transformer = gpt
self.text_pos_embedding = text_pos_emb
self.embeddings = embeddings
self.lm_head = nn.Sequential(norm, linear)
self.kv_cache = kv_cache
def store_mel_emb(self, mel_emb):
self.cached_mel_emb = mel_emb
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None) # usually None
if not self.kv_cache:
past_key_values = None
# only last token for inputs_ids if past is defined in kwargs
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert self.cached_mel_emb is not None
assert inputs_embeds is None # Not supported by this inference model.
assert labels is None # Training not supported by this inference model.
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Create embedding
mel_len = self.cached_mel_emb.shape[1]
if input_ids.shape[1] != 1:
text_inputs = input_ids[:, mel_len:]
text_emb = self.embeddings(text_inputs)
text_emb = text_emb + self.text_pos_embedding(text_emb)
if self.cached_mel_emb.shape[0] != text_emb.shape[0]:
mel_emb = self.cached_mel_emb.repeat_interleave(text_emb.shape[0] // self.cached_mel_emb.shape[0], 0)
else: # this outcome only occurs once per loop in most cases
mel_emb = self.cached_mel_emb
emb = torch.cat([mel_emb, text_emb], dim=1)
else:
emb = self.embeddings(input_ids)
emb = emb + self.text_pos_embedding.get_fixed_embedding(
attention_mask.shape[1] - mel_len, attention_mask.device
)
transformer_outputs = self.transformer(
inputs_embeds=emb,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + transformer_outputs[1:]
return CausalLMOutputWithCrossAttentions(
loss=None,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past, beam_idx):
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
class ConditioningEncoder(nn.Module):
def __init__(
self,
spec_dim,
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False,
mean=False,
):
super().__init__()
attn = []
self.init = nn.Conv1d(spec_dim, embedding_dim, kernel_size=1)
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
self.do_checkpointing = do_checkpointing
self.mean = mean
def forward(self, x):
h = self.init(x)
h = self.attn(h)
if self.mean:
return h.mean(dim=2)
else:
return h[:, :, 0]
class LearnedPositionEmbeddings(nn.Module):
def __init__(self, seq_len, model_dim, init=0.02):
super().__init__()
self.emb = nn.Embedding(seq_len, model_dim)
# Initializing this way is standard for GPT-2
self.emb.weight.data.normal_(mean=0.0, std=init)
def forward(self, x):
sl = x.shape[1]
return self.emb(torch.arange(0, sl, device=x.device))
def get_fixed_embedding(self, ind, dev):
return self.emb(torch.arange(0, ind, device=dev))[ind - 1 : ind]
def build_hf_gpt_transformer(layers, model_dim, heads, max_mel_seq_len, max_text_seq_len, checkpointing):
"""
GPT-2 implemented by the HuggingFace library.
"""
from transformers import GPT2Config, GPT2Model
gpt_config = GPT2Config(
vocab_size=256, # Unused.
n_positions=max_mel_seq_len + max_text_seq_len,
n_ctx=max_mel_seq_len + max_text_seq_len,
n_embd=model_dim,
n_layer=layers,
n_head=heads,
gradient_checkpointing=checkpointing,
use_cache=not checkpointing,
)
gpt = GPT2Model(gpt_config)
# Override the built in positional embeddings
del gpt.wpe # TODO: figure out relevance in fixing exported model definition: Embedding(1012, 1024)
gpt.wpe = functools.partial(null_position_embeddings, dim=model_dim)
# Built-in token embeddings are unused.
del gpt.wte
return (
gpt,
LearnedPositionEmbeddings(max_mel_seq_len, model_dim),
LearnedPositionEmbeddings(max_text_seq_len, model_dim),
None,
None,
)
class MelEncoder(nn.Module):
def __init__(self, channels, mel_channels=80, resblocks_per_reduction=2):
super().__init__()
self.channels = channels
self.encoder = nn.Sequential(
nn.Conv1d(mel_channels, channels // 4, kernel_size=3, padding=1),
nn.Sequential(*[ResBlock(channels // 4) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels // 4, channels // 2, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels // 16, channels // 2),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels // 2) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels // 2, channels, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels // 8, channels),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels) for _ in range(resblocks_per_reduction)]),
)
self.reduction = 4
def forward(self, x):
for e in self.encoder:
x = e(x)
return x.permute(0, 2, 1)
class UnifiedVoice(nn.Module):
def __init__(
self,
layers=8,
model_dim=512,
heads=8,
max_text_tokens=120,
max_mel_tokens=250,
max_conditioning_inputs=1,
mel_length_compression=1024,
number_text_tokens=256,
start_text_token=None,
number_mel_codes=8194,
start_mel_token=8192,
stop_mel_token=8193,
train_solo_embeddings=False,
use_mel_codes_as_input=True,
checkpointing=True,
types=1,
):
"""
Args:
layers: Number of layers in transformer stack.
model_dim: Operating dimensions of the transformer
heads: Number of transformer heads. Must be divisible by model_dim. Recommend model_dim//64
max_text_tokens: Maximum number of text tokens that will be encountered by model.
max_mel_tokens: Maximum number of MEL tokens that will be encountered by model.
max_conditioning_inputs: Maximum number of conditioning inputs provided to the model. If (1), conditioning input can be of format (b,80,s), otherwise (b,n,80,s).
mel_length_compression: The factor between <number_input_samples> and <mel_tokens>. Used to compute MEL code padding given wav input length.
number_text_tokens:
start_text_token:
stop_text_token:
number_mel_codes:
start_mel_token:
stop_mel_token:
train_solo_embeddings:
use_mel_codes_as_input:
checkpointing:
"""
super().__init__()
self.number_text_tokens = number_text_tokens
self.start_text_token = number_text_tokens * types if start_text_token is None else start_text_token
self.stop_text_token = 0
self.number_mel_codes = number_mel_codes
self.start_mel_token = start_mel_token
self.stop_mel_token = stop_mel_token
self.layers = layers
self.heads = heads
self.max_mel_tokens = max_mel_tokens
self.max_text_tokens = max_text_tokens
self.model_dim = model_dim
self.max_conditioning_inputs = max_conditioning_inputs
self.mel_length_compression = mel_length_compression
self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads)
self.text_embedding = nn.Embedding(self.number_text_tokens * types + 1, model_dim)
if use_mel_codes_as_input:
self.mel_embedding = nn.Embedding(self.number_mel_codes, model_dim)
else:
self.mel_embedding = MelEncoder(model_dim, resblocks_per_reduction=1)
(
self.gpt,
self.mel_pos_embedding,
self.text_pos_embedding,
self.mel_layer_pos_embedding,
self.text_layer_pos_embedding,
) = build_hf_gpt_transformer(
layers,
model_dim,
heads,
self.max_mel_tokens + 2 + self.max_conditioning_inputs,
self.max_text_tokens + 2,
checkpointing,
)
if train_solo_embeddings:
self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True)
self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True)
else:
self.mel_solo_embedding = 0
self.text_solo_embedding = 0
self.final_norm = nn.LayerNorm(model_dim)
self.text_head = nn.Linear(model_dim, self.number_text_tokens * types + 1)
self.mel_head = nn.Linear(model_dim, self.number_mel_codes)
# Initialize the embeddings per the GPT-2 scheme
embeddings = [self.text_embedding]
if use_mel_codes_as_input:
embeddings.append(self.mel_embedding)
for module in embeddings:
module.weight.data.normal_(mean=0.0, std=0.02)
def post_init_gpt2_config(self, kv_cache=True):
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
gpt_config = GPT2Config(
vocab_size=self.max_mel_tokens,
n_positions=seq_length,
n_ctx=seq_length,
n_embd=self.model_dim,
n_layer=self.layers,
n_head=self.heads,
gradient_checkpointing=False,
use_cache=True,
)
self.inference_model = GPT2InferenceModel(
gpt_config,
self.gpt,
self.mel_pos_embedding,
self.mel_embedding,
self.final_norm,
self.mel_head,
kv_cache=kv_cache,
)
# self.inference_model = PrunedGPT2InferenceModel(gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding, self.final_norm, self.mel_head)
self.gpt.wte = self.mel_embedding
# self.inference_model.save_pretrained("")
def build_aligned_inputs_and_targets(self, input, start_token, stop_token):
inp = F.pad(input, (1, 0), value=start_token)
tar = F.pad(input, (0, 1), value=stop_token)
return inp, tar
def set_mel_padding(self, mel_input_tokens, wav_lengths):
"""
Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in
that audio clip, reformats the tokens with STOP_MEL_TOKEN in place of the zero padding. This is required
preformatting to create a working TTS model.
"""
# Set padding areas within MEL (currently it is coded with the MEL code for <zero>).
mel_lengths = torch.div(wav_lengths, self.mel_length_compression, rounding_mode="trunc")
for b in range(len(mel_lengths)):
actual_end = (
mel_lengths[b] + 1
) # Due to the convolutional nature of how these tokens are generated, it would be best if the model predicts a token past the actual last token.
if actual_end < mel_input_tokens.shape[-1]:
mel_input_tokens[b, actual_end:] = self.stop_mel_token
return mel_input_tokens
def get_logits(
self,
speech_conditioning_inputs,
first_inputs,
first_head,
second_inputs=None,
second_head=None,
get_attns=False,
return_latent=False,
):
if second_inputs is not None:
emb = torch.cat([speech_conditioning_inputs, first_inputs, second_inputs], dim=1)
else:
emb = torch.cat([speech_conditioning_inputs, first_inputs], dim=1)
gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)
if get_attns:
return gpt_out.attentions
enc = gpt_out.last_hidden_state[:, 1:] # The first logit is tied to the speech_conditioning_input
enc = self.final_norm(enc)
if return_latent:
return (
enc[
:,
speech_conditioning_inputs.shape[1] : speech_conditioning_inputs.shape[1] + first_inputs.shape[1],
],
enc[:, -second_inputs.shape[1] :],
)
first_logits = enc[:, : first_inputs.shape[1]]
first_logits = first_head(first_logits)
first_logits = first_logits.permute(0, 2, 1)
if second_inputs is not None:
second_logits = enc[:, -second_inputs.shape[1] :]
second_logits = second_head(second_logits)
second_logits = second_logits.permute(0, 2, 1)
return first_logits, second_logits
else:
return first_logits
def get_conditioning(self, speech_conditioning_input):
speech_conditioning_input = (
speech_conditioning_input.unsqueeze(1)
if len(speech_conditioning_input.shape) == 3
else speech_conditioning_input
)
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.conditioning_encoder(speech_conditioning_input[:, j]))
conds = torch.stack(conds, dim=1)
conds = conds.mean(dim=1)
return conds
def forward(
self,
speech_conditioning_latent,
text_inputs,
text_lengths,
mel_codes,
wav_lengths,
types=None,
text_first=True,
raw_mels=None,
return_attentions=False,
return_latent=False,
clip_inputs=True,
):
"""
Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode
(actuated by `text_first`).
speech_conditioning_input: MEL float tensor, (b,1024)
text_inputs: long tensor, (b,t)
text_lengths: long tensor, (b,)
mel_inputs: long tensor, (b,m)
wav_lengths: long tensor, (b,)
raw_mels: MEL float tensor (b,80,s)
If return_attentions is specified, only logits are returned.
If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned.
If clip_inputs is True, the inputs will be clipped to the smallest input size across each input modality.
"""
# Types are expressed by expanding the text embedding space.
if types is not None:
text_inputs = text_inputs * (1 + types).unsqueeze(-1)
if clip_inputs:
# This model will receive micro-batches with a ton of padding for both the text and MELs. Ameliorate this by
# chopping the inputs by the maximum actual length.
max_text_len = text_lengths.max()
text_inputs = text_inputs[:, :max_text_len]
max_mel_len = wav_lengths.max() // self.mel_length_compression
mel_codes = mel_codes[:, :max_mel_len]
if raw_mels is not None:
raw_mels = raw_mels[:, :, : max_mel_len * 4]
mel_codes = self.set_mel_padding(mel_codes, wav_lengths)
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
mel_codes = F.pad(mel_codes, (0, 1), value=self.stop_mel_token)
conds = speech_conditioning_latent.unsqueeze(1)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(
text_inputs, self.start_text_token, self.stop_text_token
)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
mel_codes, mel_targets = self.build_aligned_inputs_and_targets(
mel_codes, self.start_mel_token, self.stop_mel_token
)
if raw_mels is not None:
mel_inp = F.pad(raw_mels, (0, 8))
else:
mel_inp = mel_codes
mel_emb = self.mel_embedding(mel_inp)
mel_emb = mel_emb + self.mel_pos_embedding(mel_codes)
if text_first:
text_logits, mel_logits = self.get_logits(
conds,
text_emb,
self.text_head,
mel_emb,
self.mel_head,
get_attns=return_attentions,
return_latent=return_latent,
)
if return_latent:
return mel_logits[
:, :-2
] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
else:
mel_logits, text_logits = self.get_logits(
conds,
mel_emb,
self.mel_head,
text_emb,
self.text_head,
get_attns=return_attentions,
return_latent=return_latent,
)
if return_latent:
return text_logits[
:, :-2
] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
if return_attentions:
return mel_logits
loss_text = F.cross_entropy(text_logits, text_targets.long())
loss_mel = F.cross_entropy(mel_logits, mel_targets.long())
return loss_text.mean(), loss_mel.mean(), mel_logits
def inference_speech(
self,
speech_conditioning_latent,
text_inputs,
input_tokens=None,
num_return_sequences=1,
max_generate_length=None,
typical_sampling=False,
typical_mass=0.9,
**hf_generate_kwargs,
):
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(
text_inputs, self.start_text_token, self.stop_text_token
)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
conds = speech_conditioning_latent.unsqueeze(1)
emb = torch.cat([conds, text_emb], dim=1)
self.inference_model.store_mel_emb(emb)
fake_inputs = torch.full(
(
emb.shape[0],
conds.shape[1] + emb.shape[1],
),
fill_value=1,
dtype=torch.long,
device=text_inputs.device,
)
fake_inputs[:, -1] = self.start_mel_token
trunc_index = fake_inputs.shape[1]
if input_tokens is None:
inputs = fake_inputs
else:
assert (
num_return_sequences % input_tokens.shape[0] == 0
), "The number of return sequences must be divisible by the number of input sequences"
fake_inputs = fake_inputs.repeat(num_return_sequences, 1)
input_tokens = input_tokens.repeat(num_return_sequences // input_tokens.shape[0], 1)
inputs = torch.cat([fake_inputs, input_tokens], dim=1)
logits_processor = (
LogitsProcessorList([TypicalLogitsWarper(mass=typical_mass)]) if typical_sampling else LogitsProcessorList()
) # TODO disable this
max_length = (
trunc_index + self.max_mel_tokens - 1 if max_generate_length is None else trunc_index + max_generate_length
)
gen = self.inference_model.generate(
inputs,
bos_token_id=self.start_mel_token,
pad_token_id=self.stop_mel_token,
eos_token_id=self.stop_mel_token,
max_length=max_length,
logits_processor=logits_processor,
num_return_sequences=num_return_sequences,
**hf_generate_kwargs,
)
return gen[:, trunc_index:]
if __name__ == "__main__":
gpt = UnifiedVoice(
model_dim=256,
heads=4,
train_solo_embeddings=True,
use_mel_codes_as_input=True,
max_conditioning_inputs=4,
)
l = gpt(
torch.randn(2, 3, 80, 800),
torch.randint(high=120, size=(2, 120)),
torch.tensor([32, 120]),
torch.randint(high=8192, size=(2, 250)),
torch.tensor([250 * 256, 195 * 256]),
)
gpt.text_forward(
torch.randn(2, 80, 800),
torch.randint(high=50, size=(2, 80)),
torch.tensor([32, 80]),
)
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/lib/util.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// FST utility definitions.
#include <fst/util.h>
#include <cctype>
#include <sstream>
#include <string>
#include <fst/flags.h>
#include <fst/log.h>
#include <fst/mapped-file.h>
// Utility flag definitions
DEFINE_bool(fst_error_fatal, true,
"FST errors are fatal; o.w. return objects flagged as bad: "
"e.g., FSTs: kError property set, FST weights: not a Member()");
namespace fst {
void SplitString(char *full, const char *delim, std::vector<char *> *vec,
bool omit_empty_strings) {
char *p = full;
while (p) {
if ((p = strpbrk(full, delim))) {
p[0] = '\0';
}
if (!omit_empty_strings || full[0] != '\0') vec->push_back(full);
if (p) full = p + 1;
}
}
int64_t StrToint64_t(const string &s, const string &src, size_t nline,
bool allow_negative, bool *error) {
int64_t n;
const char *cs = s.c_str();
char *p;
if (error) *error = false;
n = strtoll(cs, &p, 10);
if (p < cs + s.size() || (!allow_negative && n < 0)) {
FSTERROR() << "StrToint64_t: Bad integer = " << s << "\", source = " << src
<< ", line = " << nline;
if (error) *error = true;
return 0;
}
return n;
}
void ConvertToLegalCSymbol(string *s) {
for (auto it = s->begin(); it != s->end(); ++it) {
if (!isalnum(*it)) {
*it = '_';
}
}
}
// Skips over input characters to align to 'align' bytes. Returns false if can't
// align.
bool AlignInput(std::istream &strm) {
char c;
for (int i = 0; i < MappedFile::kArchAlignment; ++i) {
int64_t pos = strm.tellg();
if (pos < 0) {
LOG(ERROR) << "AlignInput: Can't determine stream position";
return false;
}
if (pos % MappedFile::kArchAlignment == 0) break;
strm.read(&c, 1);
}
return true;
}
// Write null output characters to align to 'align' bytes. Returns false if
// can't align.
bool AlignOutput(std::ostream &strm) {
for (int i = 0; i < MappedFile::kArchAlignment; ++i) {
int64_t pos = strm.tellp();
if (pos < 0) {
LOG(ERROR) << "AlignOutput: Can't determine stream position";
return false;
}
if (pos % MappedFile::kArchAlignment == 0) break;
strm.write("", 1);
}
return true;
}
int AlignBufferWithOutputStream(std::ostream &strm,
std::ostringstream &buffer) {
const auto strm_pos = strm.tellp();
if (strm_pos == std::ostream::pos_type(-1)) {
LOG(ERROR) << "Cannot determine stream position";
return -1;
}
const int stream_offset = strm_pos % MappedFile::kArchAlignment;
for (int i = 0; i < stream_offset; ++i) buffer.write("", 1);
return stream_offset;
}
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/encode.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to encode and decode an FST.
#ifndef FST_ENCODE_H_
#define FST_ENCODE_H_
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fstream>
#include <fst/arc-map.h>
#include <fst/rmfinalepsilon.h>
namespace fst {
enum EncodeType { ENCODE = 1, DECODE = 2 };
static constexpr uint32_t kEncodeLabels = 0x0001;
static constexpr uint32_t kEncodeWeights = 0x0002;
static constexpr uint32_t kEncodeFlags = 0x0003;
namespace internal {
static constexpr uint32_t kEncodeHasISymbols = 0x0004;
static constexpr uint32_t kEncodeHasOSymbols = 0x0008;
// Identifies stream data as an encode table (and its endianity)
static const int32_t kEncodeMagicNumber = 2129983209;
// The following class encapsulates implementation details for the encoding and
// decoding of label/weight tuples used for encoding and decoding of FSTs. The
// EncodeTable is bidirectional. I.e, it stores both the Tuple of encode labels
// and weights to a unique label, and the reverse.
template <class Arc>
class EncodeTable {
public:
using Label = typename Arc::Label;
using Weight = typename Arc::Weight;
// Encoded data consists of arc input/output labels and arc weight.
struct Tuple {
Tuple() {}
Tuple(Label ilabel_, Label olabel_, Weight weight_)
: ilabel(ilabel_), olabel(olabel_), weight(std::move(weight_)) {}
Tuple(const Tuple &tuple)
: ilabel(tuple.ilabel),
olabel(tuple.olabel),
weight(std::move(tuple.weight)) {}
Label ilabel;
Label olabel;
Weight weight;
};
// Comparison object for hashing EncodeTable Tuple(s).
class TupleEqual {
public:
bool operator()(const Tuple *x, const Tuple *y) const {
return (x->ilabel == y->ilabel && x->olabel == y->olabel &&
x->weight == y->weight);
}
};
// Hash function for EncodeTabe Tuples. Based on the encode flags
// we either hash the labels, weights or combination of them.
class TupleKey {
public:
TupleKey() : encode_flags_(kEncodeLabels | kEncodeWeights) {}
TupleKey(const TupleKey &key) : encode_flags_(key.encode_flags_) {}
explicit TupleKey(uint32_t encode_flags) : encode_flags_(encode_flags) {}
size_t operator()(const Tuple *x) const {
size_t hash = x->ilabel;
static constexpr int lshift = 5;
static constexpr int rshift = CHAR_BIT * sizeof(size_t) - 5;
if (encode_flags_ & kEncodeLabels) {
hash = hash << lshift ^ hash >> rshift ^ x->olabel;
}
if (encode_flags_ & kEncodeWeights) {
hash = hash << lshift ^ hash >> rshift ^ x->weight.Hash();
}
return hash;
}
private:
int32_t encode_flags_;
};
explicit EncodeTable(uint32_t encode_flags)
: flags_(encode_flags), encode_hash_(1024, TupleKey(encode_flags)) {}
using EncodeHash = std::unordered_map<const Tuple *, Label, TupleKey,
TupleEqual>;
// Given an arc, encodes either input/output labels or input/costs or both.
Label Encode(const Arc &arc) {
std::unique_ptr<Tuple> tuple(
new Tuple(arc.ilabel, flags_ & kEncodeLabels ? arc.olabel : 0,
flags_ & kEncodeWeights ? arc.weight : Weight::One()));
auto insert_result = encode_hash_.insert(
std::make_pair(tuple.get(), encode_tuples_.size() + 1));
if (insert_result.second) encode_tuples_.push_back(std::move(tuple));
return insert_result.first->second;
}
// Given an arc, looks up its encoded label or returns kNoLabel if not found.
Label GetLabel(const Arc &arc) const {
const Tuple tuple(arc.ilabel, flags_ & kEncodeLabels ? arc.olabel : 0,
flags_ & kEncodeWeights ? arc.weight : Weight::One());
auto it = encode_hash_.find(&tuple);
return (it == encode_hash_.end()) ? kNoLabel : it->second;
}
// Given an encoded arc label, decodes back to input/output labels and costs.
const Tuple *Decode(Label key) const {
if (key < 1 || key > encode_tuples_.size()) {
LOG(ERROR) << "EncodeTable::Decode: Unknown decode key: " << key;
return nullptr;
}
return encode_tuples_[key - 1].get();
}
size_t Size() const { return encode_tuples_.size(); }
bool Write(std::ostream &strm, const string &source) const;
static EncodeTable<Arc> *Read(std::istream &strm, const string &source);
uint32_t Flags() const { return flags_ & kEncodeFlags; }
const SymbolTable *InputSymbols() const { return isymbols_.get(); }
const SymbolTable *OutputSymbols() const { return osymbols_.get(); }
void SetInputSymbols(const SymbolTable *syms) {
if (syms) {
isymbols_.reset(syms->Copy());
flags_ |= kEncodeHasISymbols;
} else {
isymbols_.reset();
flags_ &= ~kEncodeHasISymbols;
}
}
void SetOutputSymbols(const SymbolTable *syms) {
if (syms) {
osymbols_.reset(syms->Copy());
flags_ |= kEncodeHasOSymbols;
} else {
osymbols_.reset();
flags_ &= ~kEncodeHasOSymbols;
}
}
private:
uint32_t flags_;
std::vector<std::unique_ptr<Tuple>> encode_tuples_;
EncodeHash encode_hash_;
std::unique_ptr<SymbolTable> isymbols_; // Pre-encoded input symbol table.
std::unique_ptr<SymbolTable> osymbols_; // Pre-encoded output symbol table.
EncodeTable(const EncodeTable &) = delete;
EncodeTable &operator=(const EncodeTable &) = delete;
};
template <class Arc>
bool EncodeTable<Arc>::Write(std::ostream &strm,
const string &source) const {
WriteType(strm, kEncodeMagicNumber);
WriteType(strm, flags_);
const int64_t size = encode_tuples_.size();
WriteType(strm, size);
for (const auto &tuple : encode_tuples_) {
WriteType(strm, tuple->ilabel);
WriteType(strm, tuple->olabel);
tuple->weight.Write(strm);
}
if (flags_ & kEncodeHasISymbols) isymbols_->Write(strm);
if (flags_ & kEncodeHasOSymbols) osymbols_->Write(strm);
strm.flush();
if (!strm) {
LOG(ERROR) << "EncodeTable::Write: Write failed: " << source;
return false;
}
return true;
}
template <class Arc>
EncodeTable<Arc> *EncodeTable<Arc>::Read(std::istream &strm,
const string &source) {
int32_t magic_number = 0;
ReadType(strm, &magic_number);
if (magic_number != kEncodeMagicNumber) {
LOG(ERROR) << "EncodeTable::Read: Bad encode table header: " << source;
return nullptr;
}
uint32_t flags;
ReadType(strm, &flags);
int64_t size;
ReadType(strm, &size);
if (!strm) {
LOG(ERROR) << "EncodeTable::Read: Read failed: " << source;
return nullptr;
}
std::unique_ptr<EncodeTable<Arc>> table(new EncodeTable<Arc>(flags));
for (int64_t i = 0; i < size; ++i) {
std::unique_ptr<Tuple> tuple(new Tuple());
ReadType(strm, &tuple->ilabel);
ReadType(strm, &tuple->olabel);
tuple->weight.Read(strm);
if (!strm) {
LOG(ERROR) << "EncodeTable::Read: Read failed: " << source;
return nullptr;
}
table->encode_tuples_.push_back(std::move(tuple));
table->encode_hash_[table->encode_tuples_.back().get()] =
table->encode_tuples_.size();
}
if (flags & kEncodeHasISymbols) {
table->isymbols_.reset(SymbolTable::Read(strm, source));
}
if (flags & kEncodeHasOSymbols) {
table->osymbols_.reset(SymbolTable::Read(strm, source));
}
return table.release();
}
} // namespace internal
// A mapper to encode/decode weighted transducers. Encoding of an FST is used
// for performing classical determinization or minimization on a weighted
// transducer viewing it as an unweighted acceptor over encoded labels.
//
// The mapper stores the encoding in a local hash table (EncodeTable). This
// table is shared (and reference-counted) between the encoder and decoder.
// A decoder has read-only access to the EncodeTable.
//
// The EncodeMapper allows on the fly encoding of the machine. As the
// EncodeTable is generated the same table may by used to decode the machine
// on the fly. For example in the following sequence of operations
//
// Encode -> Determinize -> Decode
//
// we will use the encoding table generated during the encode step in the
// decode, even though the encoding is not complete.
template <class Arc>
class EncodeMapper {
using Label = typename Arc::Label;
using Weight = typename Arc::Weight;
public:
EncodeMapper(uint32_t flags, EncodeType type)
: flags_(flags),
type_(type),
table_(std::make_shared<internal::EncodeTable<Arc>>(flags)),
error_(false) {}
EncodeMapper(const EncodeMapper &mapper)
: flags_(mapper.flags_),
type_(mapper.type_),
table_(mapper.table_),
error_(false) {}
// Copy constructor but setting the type, typically to DECODE.
EncodeMapper(const EncodeMapper &mapper, EncodeType type)
: flags_(mapper.flags_),
type_(type),
table_(mapper.table_),
error_(mapper.error_) {}
Arc operator()(const Arc &arc);
MapFinalAction FinalAction() const {
return (type_ == ENCODE && (flags_ & kEncodeWeights))
? MAP_REQUIRE_SUPERFINAL
: MAP_NO_SUPERFINAL;
}
constexpr MapSymbolsAction InputSymbolsAction() const {
return MAP_CLEAR_SYMBOLS;
}
constexpr MapSymbolsAction OutputSymbolsAction() const {
return MAP_CLEAR_SYMBOLS;
}
uint64_t Properties(uint64_t inprops) {
uint64_t outprops = inprops;
if (error_) outprops |= kError;
uint64_t mask = kFstProperties;
if (flags_ & kEncodeLabels) {
mask &= kILabelInvariantProperties & kOLabelInvariantProperties;
}
if (flags_ & kEncodeWeights) {
mask &= kILabelInvariantProperties & kWeightInvariantProperties &
(type_ == ENCODE ? kAddSuperFinalProperties
: kRmSuperFinalProperties);
}
return outprops & mask;
}
uint32_t Flags() const { return flags_; }
EncodeType Type() const { return type_; }
bool Write(std::ostream &strm, const string &source) const {
return table_->Write(strm, source);
}
bool Write(const string &filename) const {
std::ofstream strm(filename,
std::ios_base::out | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "EncodeMap: Can't open file: " << filename;
return false;
}
return Write(strm, filename);
}
static EncodeMapper<Arc> *Read(std::istream &strm, const string &source,
EncodeType type = ENCODE) {
auto *table = internal::EncodeTable<Arc>::Read(strm, source);
return table ? new EncodeMapper(table->Flags(), type, table) : nullptr;
}
static EncodeMapper<Arc> *Read(const string &filename,
EncodeType type = ENCODE) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm) {
LOG(ERROR) << "EncodeMap: Can't open file: " << filename;
return nullptr;
}
return Read(strm, filename, type);
}
const SymbolTable *InputSymbols() const { return table_->InputSymbols(); }
const SymbolTable *OutputSymbols() const { return table_->OutputSymbols(); }
void SetInputSymbols(const SymbolTable *syms) {
table_->SetInputSymbols(syms);
}
void SetOutputSymbols(const SymbolTable *syms) {
table_->SetOutputSymbols(syms);
}
private:
uint32_t flags_;
EncodeType type_;
std::shared_ptr<internal::EncodeTable<Arc>> table_;
bool error_;
explicit EncodeMapper(uint32_t flags, EncodeType type,
internal::EncodeTable<Arc> *table)
: flags_(flags), type_(type), table_(table), error_(false) {}
EncodeMapper &operator=(const EncodeMapper &) = delete;
};
template <class Arc>
Arc EncodeMapper<Arc>::operator()(const Arc &arc) {
if (type_ == ENCODE) {
if ((arc.nextstate == kNoStateId && !(flags_ & kEncodeWeights)) ||
(arc.nextstate == kNoStateId && (flags_ & kEncodeWeights) &&
arc.weight == Weight::Zero())) {
return arc;
} else {
const auto label = table_->Encode(arc);
return Arc(label, flags_ & kEncodeLabels ? label : arc.olabel,
flags_ & kEncodeWeights ? Weight::One() : arc.weight,
arc.nextstate);
}
} else { // type_ == DECODE
if (arc.nextstate == kNoStateId) {
return arc;
} else {
if (arc.ilabel == 0) return arc;
if (flags_ & kEncodeLabels && arc.ilabel != arc.olabel) {
FSTERROR() << "EncodeMapper: Label-encoded arc has different "
"input and output labels";
error_ = true;
}
if (flags_ & kEncodeWeights && arc.weight != Weight::One()) {
FSTERROR() << "EncodeMapper: Weight-encoded arc has non-trivial weight";
error_ = true;
}
const auto tuple = table_->Decode(arc.ilabel);
if (!tuple) {
FSTERROR() << "EncodeMapper: Decode failed";
error_ = true;
return Arc(kNoLabel, kNoLabel, Weight::NoWeight(), arc.nextstate);
} else {
return Arc(tuple->ilabel,
flags_ & kEncodeLabels ? tuple->olabel : arc.olabel,
flags_ & kEncodeWeights ? tuple->weight : arc.weight,
arc.nextstate);
}
}
}
}
// Complexity: O(E + V).
template <class Arc>
inline void Encode(MutableFst<Arc> *fst, EncodeMapper<Arc> *mapper) {
mapper->SetInputSymbols(fst->InputSymbols());
mapper->SetOutputSymbols(fst->OutputSymbols());
ArcMap(fst, mapper);
}
template <class Arc>
inline void Decode(MutableFst<Arc> *fst, const EncodeMapper<Arc> &mapper) {
ArcMap(fst, EncodeMapper<Arc>(mapper, DECODE));
RmFinalEpsilon(fst);
fst->SetInputSymbols(mapper.InputSymbols());
fst->SetOutputSymbols(mapper.OutputSymbols());
}
// On-the-fly encoding of an input FST.
//
// Complexity:
//
// Construction: O(1)
// Traversal: O(e + v)
//
// where e is the number of arcs visited and v is the number of states visited.
// Constant time and space to visit an input state or arc is assumed and
// exclusive of caching.
template <class Arc>
class EncodeFst : public ArcMapFst<Arc, Arc, EncodeMapper<Arc>> {
public:
using Mapper = EncodeMapper<Arc>;
using Impl = internal::ArcMapFstImpl<Arc, Arc, Mapper>;
EncodeFst(const Fst<Arc> &fst, Mapper *encoder)
: ArcMapFst<Arc, Arc, Mapper>(fst, encoder, ArcMapFstOptions()) {
encoder->SetInputSymbols(fst.InputSymbols());
encoder->SetOutputSymbols(fst.OutputSymbols());
}
EncodeFst(const Fst<Arc> &fst, const Mapper &encoder)
: ArcMapFst<Arc, Arc, Mapper>(fst, encoder, ArcMapFstOptions()) {}
// See Fst<>::Copy() for doc.
EncodeFst(const EncodeFst<Arc> &fst, bool copy = false)
: ArcMapFst<Arc, Arc, Mapper>(fst, copy) {}
// Makes a copy of this EncodeFst. See Fst<>::Copy() for further doc.
EncodeFst<Arc> *Copy(bool safe = false) const override {
if (safe) {
FSTERROR() << "EncodeFst::Copy(true): Not allowed";
GetImpl()->SetProperties(kError, kError);
}
return new EncodeFst(*this);
}
private:
using ImplToFst<Impl>::GetImpl;
using ImplToFst<Impl>::GetMutableImpl;
};
// On-the-fly decoding of an input FST.
//
// Complexity:
//
// Construction: O(1).
// Traversal: O(e + v)
//
// Constant time and space to visit an input state or arc is assumed and
// exclusive of caching.
template <class Arc>
class DecodeFst : public ArcMapFst<Arc, Arc, EncodeMapper<Arc>> {
public:
using Mapper = EncodeMapper<Arc>;
using Impl = internal::ArcMapFstImpl<Arc, Arc, Mapper>;
using ImplToFst<Impl>::GetImpl;
DecodeFst(const Fst<Arc> &fst, const Mapper &encoder)
: ArcMapFst<Arc, Arc, Mapper>(fst, Mapper(encoder, DECODE),
ArcMapFstOptions()) {
GetMutableImpl()->SetInputSymbols(encoder.InputSymbols());
GetMutableImpl()->SetOutputSymbols(encoder.OutputSymbols());
}
// See Fst<>::Copy() for doc.
DecodeFst(const DecodeFst<Arc> &fst, bool safe = false)
: ArcMapFst<Arc, Arc, Mapper>(fst, safe) {}
// Makes a copy of this DecodeFst. See Fst<>::Copy() for further doc.
DecodeFst<Arc> *Copy(bool safe = false) const override {
return new DecodeFst(*this, safe);
}
private:
using ImplToFst<Impl>::GetMutableImpl;
};
// Specialization for EncodeFst.
template <class Arc>
class StateIterator<EncodeFst<Arc>>
: public StateIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>> {
public:
explicit StateIterator(const EncodeFst<Arc> &fst)
: StateIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>>(fst) {}
};
// Specialization for EncodeFst.
template <class Arc>
class ArcIterator<EncodeFst<Arc>>
: public ArcIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>> {
public:
ArcIterator(const EncodeFst<Arc> &fst, typename Arc::StateId s)
: ArcIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>>(fst, s) {}
};
// Specialization for DecodeFst.
template <class Arc>
class StateIterator<DecodeFst<Arc>>
: public StateIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>> {
public:
explicit StateIterator(const DecodeFst<Arc> &fst)
: StateIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>>(fst) {}
};
// Specialization for DecodeFst.
template <class Arc>
class ArcIterator<DecodeFst<Arc>>
: public ArcIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>> {
public:
ArcIterator(const DecodeFst<Arc> &fst, typename Arc::StateId s)
: ArcIterator<ArcMapFst<Arc, Arc, EncodeMapper<Arc>>>(fst, s) {}
};
// Useful aliases when using StdArc.
using StdEncodeFst = EncodeFst<StdArc>;
using StdDecodeFst = DecodeFst<StdArc>;
} // namespace fst
#endif // FST_ENCODE_H_
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/customregistry.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/status.h"
#include "core/common/logging/logging.h"
#include "core/framework/op_kernel.h"
#include "core/framework/kernel_def_builder.h"
#include "core/framework/kernel_registry.h"
#if !defined(ORT_MINIMAL_BUILD)
#include "core/graph/schema_registry.h"
#endif
namespace onnxruntime {
/**
Represents a registry that contains both custom kernels and custom schemas.
*/
class CustomRegistry final {
public:
CustomRegistry()
: kernel_registry_(std::make_shared<KernelRegistry>())
#if !defined(ORT_MINIMAL_BUILD)
,
opschema_registry_(std::make_shared<onnxruntime::OnnxRuntimeOpSchemaRegistry>())
#endif
{
}
/**
* Register a kernel definition together with kernel factory method to this session.
* If any conflict happened between registered kernel def and built-in kernel def,
* registered kernel will have higher priority.
* Call this before invoking Initialize().
* @return OK if success.
*/
common::Status RegisterCustomKernel(KernelDefBuilder& kernel_def_builder, const KernelCreateFn& kernel_creator);
common::Status RegisterCustomKernel(KernelCreateInfo&);
const std::shared_ptr<KernelRegistry>& GetKernelRegistry();
#if !defined(ORT_MINIMAL_BUILD)
common::Status RegisterOpSet(std::vector<ONNX_NAMESPACE::OpSchema>& schemas, const std::string& domain,
int baseline_opset_version, int opset_version);
const std::shared_ptr<onnxruntime::OnnxRuntimeOpSchemaRegistry>& GetOpschemaRegistry();
#endif
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomRegistry);
std::shared_ptr<KernelRegistry> kernel_registry_;
#if !defined(ORT_MINIMAL_BUILD)
std::shared_ptr<onnxruntime::OnnxRuntimeOpSchemaRegistry> opschema_registry_;
#endif
};
} // namespace onnxruntime
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/map.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Compatibility file for old-style Map() functions and MapFst class that have
// been renamed to ArcMap (cf. StateMap).
#ifndef FST_MAP_H_
#define FST_MAP_H_
#include <fst/arc-map.h>
namespace fst {
template <class A, class C>
void Map(MutableFst<A> *fst, C *mapper) {
ArcMap(fst, mapper);
}
template <class A, class C>
void Map(MutableFst<A> *fst, C mapper) {
ArcMap(fst, mapper);
}
template <class A, class B, class C>
void Map(const Fst<A> &ifst, MutableFst<B> *ofst, C *mapper) {
ArcMap(ifst, ofst, mapper);
}
template <class A, class B, class C>
void Map(const Fst<A> &ifst, MutableFst<B> *ofst, C mapper) {
ArcMap(ifst, ofst, mapper);
}
using MapFstOptions = ArcMapFstOptions;
template <class A, class B, class C>
class MapFst : public ArcMapFst<A, B, C> {
public:
using FromArc = A;
using ToArc = B;
using StateId = typename ToArc::StateId;
using Weight = typename ToArc::Weight;
using State = CacheState<B>;
MapFst(const Fst<A> &fst, const C &mapper, const MapFstOptions &opts)
: ArcMapFst<A, B, C>(fst, mapper, opts) {}
MapFst(const Fst<A> &fst, C *mapper, const MapFstOptions &opts)
: ArcMapFst<A, B, C>(fst, mapper, opts) {}
MapFst(const Fst<A> &fst, const C &mapper)
: ArcMapFst<A, B, C>(fst, mapper) {}
MapFst(const Fst<A> &fst, C *mapper) : ArcMapFst<A, B, C>(fst, mapper) {}
// See Fst<>::Copy() for doc.
MapFst(const MapFst<A, B, C> &fst, bool safe = false)
: ArcMapFst<A, B, C>(fst, safe) {}
// Get a copy of this MapFst. See Fst<>::Copy() for further doc.
MapFst<A, B, C> *Copy(bool safe = false) const override {
return new MapFst(*this, safe);
}
};
// Specialization for MapFst.
template <class A, class B, class C>
class StateIterator<MapFst<A, B, C>>
: public StateIterator<ArcMapFst<A, B, C>> {
public:
explicit StateIterator(const ArcMapFst<A, B, C> &fst)
: StateIterator<ArcMapFst<A, B, C>>(fst) {}
};
// Specialization for MapFst.
template <class A, class B, class C>
class ArcIterator<MapFst<A, B, C>> : public ArcIterator<ArcMapFst<A, B, C>> {
public:
ArcIterator(const ArcMapFst<A, B, C> &fst, typename A::StateId s)
: ArcIterator<ArcMapFst<A, B, C>>(fst, s) {}
};
// For backwards compatibility only; use IdentityArcMapper otherwise.
template <class A>
struct IdentityMapper {
using FromArc = A;
using ToArc = A;
ToArc operator()(const FromArc &arc) const { return arc; }
constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; }
constexpr MapSymbolsAction InputSymbolsAction() const {
return MAP_COPY_SYMBOLS;
}
constexpr MapSymbolsAction OutputSymbolsAction() const {
return MAP_COPY_SYMBOLS;
}
uint64 Properties(uint64 props) const { return props; }
};
} // namespace fst
#endif // FST_MAP_H_
| 0 |
coqui_public_repos/TTS/TTS/tts | coqui_public_repos/TTS/TTS/tts/configs/neuralhmm_tts_config.py | from dataclasses import dataclass, field
from typing import List
from TTS.tts.configs.shared_configs import BaseTTSConfig
@dataclass
class NeuralhmmTTSConfig(BaseTTSConfig):
"""
Define parameters for Neural HMM TTS model.
Example:
>>> from TTS.tts.configs.overflow_config import OverflowConfig
>>> config = OverflowConfig()
Args:
model (str):
Model name used to select the right model class to initilize. Defaults to `Overflow`.
run_eval_steps (int):
Run evalulation epoch after N steps. If None, waits until training epoch is completed. Defaults to None.
save_step (int):
Save local checkpoint every save_step steps. Defaults to 500.
plot_step (int):
Plot training stats on the logger every plot_step steps. Defaults to 1.
model_param_stats (bool):
Log model parameters stats on the logger dashboard. Defaults to False.
force_generate_statistics (bool):
Force generate mel normalization statistics. Defaults to False.
mel_statistics_parameter_path (str):
Path to the mel normalization statistics.If the model doesn't finds a file there it will generate statistics.
Defaults to None.
num_chars (int):
Number of characters used by the model. It must be defined before initializing the model. Defaults to None.
state_per_phone (int):
Generates N states per phone. Similar, to `add_blank` parameter in GlowTTS but in Overflow it is upsampled by model's encoder. Defaults to 2.
encoder_in_out_features (int):
Channels of encoder input and character embedding tensors. Defaults to 512.
encoder_n_convolutions (int):
Number of convolution layers in the encoder. Defaults to 3.
out_channels (int):
Channels of the final model output. It must match the spectragram size. Defaults to 80.
ar_order (int):
Autoregressive order of the model. Defaults to 1. In ablations of Neural HMM it was found that more autoregression while giving more variation hurts naturalness of the synthesised audio.
sampling_temp (float):
Variation added to the sample from the latent space of neural HMM. Defaults to 0.334.
deterministic_transition (bool):
deterministic duration generation based on duration quantiles as defiend in "S. Ronanki, O. Watts, S. King, and G. E. Henter, “Medianbased generation of synthetic speech durations using a nonparametric approach,” in Proc. SLT, 2016.". Defaults to True.
duration_threshold (float):
Threshold for duration quantiles. Defaults to 0.55. Tune this to change the speaking rate of the synthesis, where lower values defines a slower speaking rate and higher values defines a faster speaking rate.
use_grad_checkpointing (bool):
Use gradient checkpointing to save memory. In a multi-GPU setting currently pytorch does not supports gradient checkpoint inside a loop so we will have to turn it off then.Adjust depending on whatever get more batch size either by using a single GPU or multi-GPU. Defaults to True.
max_sampling_time (int):
Maximum sampling time while synthesising latents from neural HMM. Defaults to 1000.
prenet_type (str):
`original` or `bn`. `original` sets the default Prenet and `bn` uses Batch Normalization version of the
Prenet. Defaults to `original`.
prenet_dim (int):
Dimension of the Prenet. Defaults to 256.
prenet_n_layers (int):
Number of layers in the Prenet. Defaults to 2.
prenet_dropout (float):
Dropout rate of the Prenet. Defaults to 0.5.
prenet_dropout_at_inference (bool):
Use dropout at inference time. Defaults to False.
memory_rnn_dim (int):
Dimension of the memory LSTM to process the prenet output. Defaults to 1024.
outputnet_size (list[int]):
Size of the output network inside the neural HMM. Defaults to [1024].
flat_start_params (dict):
Parameters for the flat start initialization of the neural HMM. Defaults to `{"mean": 0.0, "std": 1.0, "transition_p": 0.14}`.
It will be recomputed when you pass the dataset.
std_floor (float):
Floor value for the standard deviation of the neural HMM. Prevents model cheating by putting point mass and getting infinite likelihood at any datapoint. Defaults to 0.01.
It is called `variance flooring` in standard HMM literature.
optimizer (str):
Optimizer to use for training. Defaults to `adam`.
optimizer_params (dict):
Parameters for the optimizer. Defaults to `{"weight_decay": 1e-6}`.
grad_clip (float):
Gradient clipping threshold. Defaults to 40_000.
lr (float):
Learning rate. Defaults to 1e-3.
lr_scheduler (str):
Learning rate scheduler for the training. Use one from `torch.optim.Scheduler` schedulers or
`TTS.utils.training`. Defaults to `None`.
min_seq_len (int):
Minimum input sequence length to be used at training.
max_seq_len (int):
Maximum input sequence length to be used at training. Larger values result in more VRAM usage.
"""
model: str = "NeuralHMM_TTS"
# Training and Checkpoint configs
run_eval_steps: int = 100
save_step: int = 500
plot_step: int = 1
model_param_stats: bool = False
# data parameters
force_generate_statistics: bool = False
mel_statistics_parameter_path: str = None
# Encoder parameters
num_chars: int = None
state_per_phone: int = 2
encoder_in_out_features: int = 512
encoder_n_convolutions: int = 3
# HMM parameters
out_channels: int = 80
ar_order: int = 1
sampling_temp: float = 0
deterministic_transition: bool = True
duration_threshold: float = 0.43
use_grad_checkpointing: bool = True
max_sampling_time: int = 1000
## Prenet parameters
prenet_type: str = "original"
prenet_dim: int = 256
prenet_n_layers: int = 2
prenet_dropout: float = 0.5
prenet_dropout_at_inference: bool = True
memory_rnn_dim: int = 1024
## Outputnet parameters
outputnet_size: List[int] = field(default_factory=lambda: [1024])
flat_start_params: dict = field(default_factory=lambda: {"mean": 0.0, "std": 1.0, "transition_p": 0.14})
std_floor: float = 0.001
# optimizer parameters
optimizer: str = "Adam"
optimizer_params: dict = field(default_factory=lambda: {"weight_decay": 1e-6})
grad_clip: float = 40000.0
lr: float = 1e-3
lr_scheduler: str = None
# overrides
min_text_len: int = 10
max_text_len: int = 500
min_audio_len: int = 512
# testing
test_sentences: List[str] = field(
default_factory=lambda: [
"Be a voice, not an echo.",
]
)
# Extra needed config
r: int = 1
use_d_vector_file: bool = False
use_speaker_embedding: bool = False
def check_values(self):
"""Validate the hyperparameters.
Raises:
AssertionError: when the parameters network is not defined
AssertionError: transition probability is not between 0 and 1
"""
assert self.ar_order > 0, "AR order must be greater than 0 it is an autoregressive model."
assert (
len(self.outputnet_size) >= 1
), f"Parameter Network must have atleast one layer check the config file for parameter network. Provided: {self.parameternetwork}"
assert (
0 < self.flat_start_params["transition_p"] < 1
), f"Transition probability must be between 0 and 1. Provided: {self.flat_start_params['transition_p']}"
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm/lm | coqui_public_repos/inference-engine/third_party/kenlm/lm/interpolate/tune_instances_test.cc | #include "lm/interpolate/tune_instances.hh"
#include "util/file.hh"
#include "util/file_stream.hh"
#include "util/stream/chain.hh"
#include "util/stream/config.hh"
#include "util/stream/typed_stream.hh"
#include "util/string_piece.hh"
#define BOOST_TEST_MODULE InstanceTest
#include <boost/test/unit_test.hpp>
#include <vector>
#include <math.h>
namespace lm { namespace interpolate { namespace {
BOOST_AUTO_TEST_CASE(Toy) {
util::scoped_fd test_input(util::MakeTemp("temporary"));
util::FileStream(test_input.get()) << "c\n";
StringPiece dir("../common/test_data/");
if (boost::unit_test::framework::master_test_suite().argc == 2) {
StringPiece zero_file(boost::unit_test::framework::master_test_suite().argv[1]);
BOOST_REQUIRE(zero_file.size() > strlen("toy0.1"));
BOOST_REQUIRE_EQUAL("toy0.1", StringPiece(zero_file.data() + zero_file.size() - 6, 6));
dir = StringPiece(zero_file.data(), zero_file.size() - 6);
}
std::vector<StringPiece> model_names;
std::string full0 = std::string(dir.data(), dir.size()) + "toy0";
std::string full1 = std::string(dir.data(), dir.size()) + "toy1";
model_names.push_back(full0);
model_names.push_back(full1);
// Tiny buffer sizes.
InstancesConfig config;
config.model_read_chain_mem = 100;
config.extension_write_chain_mem = 100;
config.lazy_memory = 100;
config.sort.temp_prefix = "temporary";
config.sort.buffer_size = 100;
config.sort.total_memory = 1024;
util::SeekOrThrow(test_input.get(), 0);
Instances inst(test_input.release(), model_names, config);
BOOST_CHECK_EQUAL(1, inst.BOS());
const Matrix &ln_unigrams = inst.LNUnigrams();
// <unk>=0
BOOST_CHECK_CLOSE(-0.90309 * M_LN10, ln_unigrams(0, 0), 0.001);
BOOST_CHECK_CLOSE(-1 * M_LN10, ln_unigrams(0, 1), 0.001);
// <s>=1 doesn't matter as long as it doesn't cause NaNs.
BOOST_CHECK(!isnan(ln_unigrams(1, 0)));
BOOST_CHECK(!isnan(ln_unigrams(1, 1)));
// a = 2
BOOST_CHECK_CLOSE(-0.46943438 * M_LN10, ln_unigrams(2, 0), 0.001);
BOOST_CHECK_CLOSE(-0.6146491 * M_LN10, ln_unigrams(2, 1), 0.001);
// </s> = 3
BOOST_CHECK_CLOSE(-0.5720968 * M_LN10, ln_unigrams(3, 0), 0.001);
BOOST_CHECK_CLOSE(-0.6146491 * M_LN10, ln_unigrams(3, 1), 0.001);
// c = 4
BOOST_CHECK_CLOSE(-0.90309 * M_LN10, ln_unigrams(4, 0), 0.001); // <unk>
BOOST_CHECK_CLOSE(-0.7659168 * M_LN10, ln_unigrams(4, 1), 0.001);
// too lazy to do b = 5.
// Two instances:
// <s> predicts c
// <s> c predicts </s>
BOOST_REQUIRE_EQUAL(2, inst.NumInstances());
BOOST_CHECK_CLOSE(-0.30103 * M_LN10, inst.LNBackoffs(0)(0), 0.001);
BOOST_CHECK_CLOSE(-0.30103 * M_LN10, inst.LNBackoffs(0)(1), 0.001);
// Backoffs of <s> c
BOOST_CHECK_CLOSE(0.0, inst.LNBackoffs(1)(0), 0.001);
BOOST_CHECK_CLOSE((-0.30103 - 0.30103) * M_LN10, inst.LNBackoffs(1)(1), 0.001);
util::stream::Chain extensions(util::stream::ChainConfig(inst.ReadExtensionsEntrySize(), 2, 300));
inst.ReadExtensions(extensions);
util::stream::TypedStream<Extension> stream(extensions.Add());
extensions >> util::stream::kRecycle;
// The extensions are (in order of instance, vocab id, and model as they should be sorted):
// <s> a from both models 0 and 1 (so two instances)
// <s> c from model 1
// <s> b from model 0
// c </s> from model 1
// Magic probabilities come from querying the models directly.
// <s> a from model 0
BOOST_REQUIRE(stream);
BOOST_CHECK_EQUAL(0, stream->instance);
BOOST_CHECK_EQUAL(2 /* a */, stream->word);
BOOST_CHECK_EQUAL(0, stream->model);
BOOST_CHECK_CLOSE(-0.37712017 * M_LN10, stream->ln_prob, 0.001);
// <s> a from model 1
BOOST_REQUIRE(++stream);
BOOST_CHECK_EQUAL(0, stream->instance);
BOOST_CHECK_EQUAL(2 /* a */, stream->word);
BOOST_CHECK_EQUAL(1, stream->model);
BOOST_CHECK_CLOSE(-0.4301247 * M_LN10, stream->ln_prob, 0.001);
// <s> c from model 1
BOOST_REQUIRE(++stream);
BOOST_CHECK_EQUAL(0, stream->instance);
BOOST_CHECK_EQUAL(4 /* c */, stream->word);
BOOST_CHECK_EQUAL(1, stream->model);
BOOST_CHECK_CLOSE(-0.4740302 * M_LN10, stream->ln_prob, 0.001);
// <s> b from model 0
BOOST_REQUIRE(++stream);
BOOST_CHECK_EQUAL(0, stream->instance);
BOOST_CHECK_EQUAL(5 /* b */, stream->word);
BOOST_CHECK_EQUAL(0, stream->model);
BOOST_CHECK_CLOSE(-0.41574955 * M_LN10, stream->ln_prob, 0.001);
// c </s> from model 1
BOOST_REQUIRE(++stream);
BOOST_CHECK_EQUAL(1, stream->instance);
BOOST_CHECK_EQUAL(3 /* </s> */, stream->word);
BOOST_CHECK_EQUAL(1, stream->model);
BOOST_CHECK_CLOSE(-0.09113217 * M_LN10, stream->ln_prob, 0.001);
BOOST_CHECK(!++stream);
}
}}} // namespaces
| 0 |
coqui_public_repos/inference-engine/src | coqui_public_repos/inference-engine/src/ctcdecode/decoder_utils.cpp | #include "decoder_utils.h"
#include <algorithm>
#include <cmath>
#include <limits>
std::vector<std::pair<size_t, float>> get_pruned_log_probs(
const double *prob_step,
size_t class_dim,
double cutoff_prob,
size_t cutoff_top_n) {
std::vector<std::pair<int, double>> prob_idx;
for (size_t i = 0; i < class_dim; ++i) {
prob_idx.push_back(std::pair<int, double>(i, prob_step[i]));
}
// pruning of vacobulary
size_t cutoff_len = class_dim;
if (cutoff_prob < 1.0 || cutoff_top_n < cutoff_len) {
std::sort(
prob_idx.begin(), prob_idx.end(), pair_comp_second_rev<int, double>);
if (cutoff_prob < 1.0) {
double cum_prob = 0.0;
cutoff_len = 0;
for (size_t i = 0; i < prob_idx.size(); ++i) {
cum_prob += prob_idx[i].second;
cutoff_len += 1;
if (cum_prob >= cutoff_prob || cutoff_len >= cutoff_top_n) break;
}
}
prob_idx = std::vector<std::pair<int, double>>(
prob_idx.begin(), prob_idx.begin() + cutoff_len);
}
std::vector<std::pair<size_t, float>> log_prob_idx;
for (size_t i = 0; i < cutoff_len; ++i) {
log_prob_idx.push_back(std::pair<int, float>(
prob_idx[i].first, log(prob_idx[i].second + NUM_FLT_MIN)));
}
return log_prob_idx;
}
size_t get_utf8_str_len(const std::string &str) {
size_t str_len = 0;
for (char c : str) {
str_len += ((c & 0xc0) != 0x80);
}
return str_len;
}
std::vector<std::string> split_into_codepoints(const std::string &str) {
std::vector<std::string> result;
std::string out_str;
for (char c : str) {
if (byte_is_codepoint_boundary(c)) {
if (!out_str.empty()) {
result.push_back(out_str);
out_str.clear();
}
}
out_str.append(1, c);
}
result.push_back(out_str);
return result;
}
std::vector<std::string> split_into_bytes(const std::string &str) {
std::vector<std::string> result;
for (char c : str) {
std::string ch(1, c);
result.push_back(ch);
}
return result;
}
std::vector<std::string> split_str(const std::string &s,
const std::string &delim) {
std::vector<std::string> result;
std::size_t start = 0, delim_len = delim.size();
while (true) {
std::size_t end = s.find(delim, start);
if (end == std::string::npos) {
if (start < s.size()) {
result.push_back(s.substr(start));
}
break;
}
if (end > start) {
result.push_back(s.substr(start, end - start));
}
start = end + delim_len;
}
return result;
}
bool prefix_compare(const PathTrie *x, const PathTrie *y) {
if (x->score == y->score) {
if (x->character == y->character) {
return false;
} else {
return (x->character < y->character);
}
} else {
return x->score > y->score;
}
}
bool prefix_compare_external(const PathTrie *x, const PathTrie *y, const std::unordered_map<const PathTrie*, float>& scores) {
if (scores.at(x) == scores.at(y)) {
if (x->character == y->character) {
return false;
} else {
return (x->character < y->character);
}
} else {
return scores.at(x) > scores.at(y);
}
}
void add_word_to_fst(const std::vector<unsigned int> &word,
fst::StdVectorFst *dictionary) {
if (dictionary->NumStates() == 0) {
fst::StdVectorFst::StateId start = dictionary->AddState();
assert(start == 0);
dictionary->SetStart(start);
}
fst::StdVectorFst::StateId src = dictionary->Start();
fst::StdVectorFst::StateId dst;
for (auto c : word) {
dst = dictionary->AddState();
dictionary->AddArc(src, fst::StdArc(c, c, 0, dst));
src = dst;
}
dictionary->SetFinal(dst, fst::StdArc::Weight::One());
}
bool add_word_to_dictionary(
const std::string &word,
const std::unordered_map<std::string, int> &char_map,
bool utf8,
int SPACE_ID,
fst::StdVectorFst *dictionary) {
auto characters = utf8 ? split_into_bytes(word) : split_into_codepoints(word);
std::vector<unsigned int> int_word;
for (auto &c : characters) {
auto int_c = char_map.find(c);
if (int_c != char_map.end()) {
int_word.push_back(int_c->second);
} else {
return false; // return without adding
}
}
if (!utf8) {
int_word.push_back(SPACE_ID);
}
add_word_to_fst(int_word, dictionary);
return true; // return with successful adding
}
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/script/epsnormalize.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#ifndef FST_SCRIPT_EPSNORMALIZE_H_
#define FST_SCRIPT_EPSNORMALIZE_H_
#include <tuple>
#include <fst/epsnormalize.h>
#include <fst/script/fst-class.h>
namespace fst {
namespace script {
using EpsNormalizeArgs = std::tuple<const FstClass &, MutableFstClass *,
EpsNormalizeType>;
template <class Arc>
void EpsNormalize(EpsNormalizeArgs *args) {
const Fst<Arc> &ifst = *(std::get<0>(*args).GetFst<Arc>());
MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>();
EpsNormalize(ifst, ofst, std::get<2>(*args));
}
void EpsNormalize(const FstClass &ifst, MutableFstClass *ofst,
EpsNormalizeType norm_type = EPS_NORM_INPUT);
} // namespace script
} // namespace fst
#endif // FST_SCRIPT_EPSNORMALIZE_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/equivalent.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/equivalent.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
bool Equivalent(const FstClass &fst1, const FstClass &fst2, float delta) {
if (!internal::ArcTypesMatch(fst1, fst2, "Equivalent")) return false;
EquivalentInnerArgs iargs(fst1, fst2, delta);
EquivalentArgs args(iargs);
Apply<Operation<EquivalentArgs>>("Equivalent", fst1.ArcType(), &args);
return args.retval;
}
REGISTER_FST_OPERATION(Equivalent, StdArc, EquivalentArgs);
REGISTER_FST_OPERATION(Equivalent, LogArc, EquivalentArgs);
REGISTER_FST_OPERATION(Equivalent, Log64Arc, EquivalentArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party | coqui_public_repos/inference-engine/third_party/onnxruntime/LICENSE | MIT License
Copyright (c) Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/label-reachable.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to determine if a non-epsilon label can be read as the first
// non-epsilon symbol along some path from a given state.
#ifndef FST_LABEL_REACHABLE_H_
#define FST_LABEL_REACHABLE_H_
#include <unordered_map>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/accumulator.h>
#include <fst/arcsort.h>
#include <fst/interval-set.h>
#include <fst/state-reachable.h>
#include <fst/util.h>
#include <fst/vector-fst.h>
namespace fst {
// Stores shareable data for label reachable class copies.
template <typename Label>
class LabelReachableData {
public:
using LabelIntervalSet = IntervalSet<Label>;
using Interval = typename LabelIntervalSet::Interval;
explicit LabelReachableData(bool reach_input, bool keep_relabel_data = true)
: reach_input_(reach_input),
keep_relabel_data_(keep_relabel_data),
have_relabel_data_(true),
final_label_(kNoLabel) {}
~LabelReachableData() {}
bool ReachInput() const { return reach_input_; }
std::vector<LabelIntervalSet> *MutableIntervalSets() {
return &interval_sets_;
}
const LabelIntervalSet &GetIntervalSet(int s) const {
return interval_sets_[s];
}
int NumIntervalSets() const { return interval_sets_.size(); }
std::unordered_map<Label, Label> *Label2Index() {
if (!have_relabel_data_) {
FSTERROR() << "LabelReachableData: No relabeling data";
}
return &label2index_;
}
void SetFinalLabel(Label final_label) { final_label_ = final_label; }
Label FinalLabel() const { return final_label_; }
static LabelReachableData<Label> *Read(std::istream &istrm,
const FstReadOptions &opts) {
auto *data = new LabelReachableData<Label>();
ReadType(istrm, &data->reach_input_);
ReadType(istrm, &data->keep_relabel_data_);
data->have_relabel_data_ = data->keep_relabel_data_;
if (data->keep_relabel_data_) ReadType(istrm, &data->label2index_);
ReadType(istrm, &data->final_label_);
ReadType(istrm, &data->interval_sets_);
return data;
}
bool Write(std::ostream &ostrm, const FstWriteOptions &opts) const {
WriteType(ostrm, reach_input_);
WriteType(ostrm, keep_relabel_data_);
if (keep_relabel_data_) WriteType(ostrm, label2index_);
WriteType(ostrm, FinalLabel());
WriteType(ostrm, interval_sets_);
return true;
}
private:
LabelReachableData() {}
bool reach_input_; // Input labels considered?
bool keep_relabel_data_; // Save label2index_ to file?
bool have_relabel_data_; // Using label2index_?
Label final_label_; // Final label.
std::unordered_map<Label, Label> label2index_; // Finds index for a label.
std::vector<LabelIntervalSet> interval_sets_; // Interval sets per state.
};
// Tests reachability of labels from a given state. If reach_input is true, then
// input labels are considered, o.w. output labels are considered. To test for
// reachability from a state s, first do SetState(s), then a label l can be
// reached from state s of FST f iff Reach(r) is true where r = Relabel(l). The
// relabeling is required to ensure a compact representation of the reachable
// labels.
// The whole FST can be relabeled instead with Relabel(&f, reach_input) so that
// the test Reach(r) applies directly to the labels of the transformed FST f.
// The relabeled FST will also be sorted appropriately for composition.
//
// Reachablity of a final state from state s (via an epsilon path) can be
// tested with ReachFinal().
//
// Reachability can also be tested on the set of labels specified by an arc
// iterator, useful for FST composition. In particular, Reach(aiter, ...) is
// true if labels on the input (output) side of the transitions of the arc
// iterator, when iter_input is true (false), can be reached from the state s.
// The iterator labels must have already been relabeled.
//
// With the arc iterator test of reachability, the begin position, end position
// and accumulated arc weight of the matches can be returned. The optional
// template argument controls how reachable arc weights are accumulated. The
// default uses semiring Plus(). Alternative ones can be used to distribute the
// weights in composition in various ways.
template <class Arc, class Accumulator = DefaultAccumulator<Arc>,
class D = LabelReachableData<typename Arc::Label>>
class LabelReachable {
public:
using Label = typename Arc::Label;
using StateId = typename Arc::StateId;
using Weight = typename Arc::Weight;
using Data = D;
using LabelIntervalSet = typename Data::LabelIntervalSet;
using Interval = typename LabelIntervalSet::Interval;
LabelReachable(const Fst<Arc> &fst, bool reach_input,
Accumulator *accumulator = nullptr,
bool keep_relabel_data = true)
: fst_(new VectorFst<Arc>(fst)),
s_(kNoStateId),
data_(std::make_shared<Data>(reach_input, keep_relabel_data)),
accumulator_(accumulator ? accumulator : new Accumulator()),
ncalls_(0),
nintervals_(0),
reach_fst_input_(false),
error_(false) {
const auto ins = fst_->NumStates();
TransformFst();
FindIntervals(ins);
fst_.reset();
}
explicit LabelReachable(std::shared_ptr<Data> data,
Accumulator *accumulator = nullptr)
: s_(kNoStateId),
data_(std::move(data)),
accumulator_(accumulator ? accumulator : new Accumulator()),
ncalls_(0),
nintervals_(0),
reach_fst_input_(false),
error_(false) {}
LabelReachable(const LabelReachable<Arc, Accumulator, Data> &reachable,
bool safe = false)
: s_(kNoStateId),
data_(reachable.data_),
accumulator_(new Accumulator(*reachable.accumulator_, safe)),
ncalls_(0),
nintervals_(0),
reach_fst_input_(reachable.reach_fst_input_),
error_(reachable.error_) {}
~LabelReachable() {
if (ncalls_ > 0) {
VLOG(2) << "# of calls: " << ncalls_;
VLOG(2) << "# of intervals/call: " << (nintervals_ / ncalls_);
}
}
// Relabels w.r.t labels that give compact label sets.
Label Relabel(Label label) {
if (label == 0 || error_) return label;
auto &label2index = *data_->Label2Index();
auto &relabel = label2index[label];
if (!relabel) relabel = label2index.size() + 1; // Adds new label.
return relabel;
}
// Relabels FST w.r.t to labels that give compact label sets.
void Relabel(MutableFst<Arc> *fst, bool relabel_input) {
for (StateIterator<MutableFst<Arc>> siter(*fst); !siter.Done();
siter.Next()) {
for (MutableArcIterator<MutableFst<Arc>> aiter(fst, siter.Value());
!aiter.Done(); aiter.Next()) {
auto arc = aiter.Value();
if (relabel_input) {
arc.ilabel = Relabel(arc.ilabel);
} else {
arc.olabel = Relabel(arc.olabel);
}
aiter.SetValue(arc);
}
}
if (relabel_input) {
ArcSort(fst, ILabelCompare<Arc>());
fst->SetInputSymbols(nullptr);
} else {
ArcSort(fst, OLabelCompare<Arc>());
fst->SetOutputSymbols(nullptr);
}
}
// Returns relabeling pairs (cf. relabel.h::Relabel()). If avoid_collisions is
// true, extra pairs are added to ensure no collisions when relabeling
// automata that have labels unseen here.
void RelabelPairs(std::vector<std::pair<Label, Label>> *pairs,
bool avoid_collisions = false) {
pairs->clear();
const auto &label2index = *data_->Label2Index();
// Maps labels to their new values in [1, label2index().size()].
for (auto it = label2index.begin(); it != label2index.end(); ++it) {
if (it->second != data_->FinalLabel()) {
pairs->push_back(std::make_pair(it->first, it->second));
}
}
if (avoid_collisions) {
// Ensures any label in [1, label2index().size()] is mapped either
// by the above step or to label2index() + 1 (to avoid collisions).
for (size_t i = 1; i <= label2index.size(); ++i) {
const auto it = label2index.find(i);
if (it == label2index.end() || it->second == data_->FinalLabel()) {
pairs->push_back(std::make_pair(i, label2index.size() + 1));
}
}
}
}
// Set current state. Optionally set state associated
// with arc iterator to be passed to Reach.
void SetState(StateId s, StateId aiter_s = kNoStateId) {
s_ = s;
if (aiter_s != kNoStateId) {
accumulator_->SetState(aiter_s);
if (accumulator_->Error()) error_ = true;
}
}
// Can reach this label from current state?
// Original labels must be transformed by the Relabel methods above.
bool Reach(Label label) const {
if (label == 0 || error_) return false;
return data_->GetIntervalSet(s_).Member(label);
}
// Can reach final state (via epsilon transitions) from this state?
bool ReachFinal() const {
if (error_) return false;
return data_->GetIntervalSet(s_).Member(data_->FinalLabel());
}
// Initialize with secondary FST to be used with Reach(Iterator,...).
// If reach_input = true, then arc input labels are considered in
// Reach(aiter, ...), o.w. output labels are considered. If copy is true, then
// the FST is a copy of the FST used in the previous call to this method
// (useful to avoid unnecessary updates).
template <class FST>
void ReachInit(const FST &fst, bool reach_input, bool copy = false) {
reach_fst_input_ = reach_input;
if (!fst.Properties(reach_fst_input_ ? kILabelSorted : kOLabelSorted,
true)) {
FSTERROR() << "LabelReachable::ReachInit: Fst is not sorted";
error_ = true;
}
accumulator_->Init(fst, copy);
if (accumulator_->Error()) error_ = true;
}
// Can reach any arc iterator label between iterator positions
// aiter_begin and aiter_end?
// Arc iterator labels must be transformed by the Relabel methods
// above. If compute_weight is true, user may call ReachWeight().
template <class Iterator>
bool Reach(Iterator *aiter, ssize_t aiter_begin, ssize_t aiter_end,
bool compute_weight) {
if (error_) return false;
const auto &interval_set = data_->GetIntervalSet(s_);
++ncalls_;
nintervals_ += interval_set.Size();
reach_begin_ = -1;
reach_end_ = -1;
reach_weight_ = Weight::Zero();
const auto flags = aiter->Flags(); // Save flags to restore them on exit.
aiter->SetFlags(kArcNoCache, kArcNoCache); // Makes caching optional.
aiter->Seek(aiter_begin);
if (2 * (aiter_end - aiter_begin) < interval_set.Size()) {
// Checks each arc against intervals, setting arc iterator flags to only
// compute the ilabel or olabel values, since they are the only values
// required for most of the arcs processed.
aiter->SetFlags(reach_fst_input_ ? kArcILabelValue : kArcOLabelValue,
kArcValueFlags);
Label reach_label = kNoLabel;
for (auto aiter_pos = aiter_begin; aiter_pos < aiter_end;
aiter->Next(), ++aiter_pos) {
const auto &arc = aiter->Value();
const auto label = reach_fst_input_ ? arc.ilabel : arc.olabel;
if (label == reach_label || Reach(label)) {
reach_label = label;
if (reach_begin_ < 0) reach_begin_ = aiter_pos;
reach_end_ = aiter_pos + 1;
if (compute_weight) {
if (!(aiter->Flags() & kArcWeightValue)) {
// If arc.weight wasn't computed by the call to aiter->Value()
// above, we need to call aiter->Value() again after having set
// the arc iterator flags to compute the arc weight value.
aiter->SetFlags(kArcWeightValue, kArcValueFlags);
const auto &arcb = aiter->Value();
// Call the accumulator.
reach_weight_ = accumulator_->Sum(reach_weight_, arcb.weight);
// Only ilabel or olabel required to process the following arcs.
aiter->SetFlags(
reach_fst_input_ ? kArcILabelValue : kArcOLabelValue,
kArcValueFlags);
} else {
// Calls the accumulator.
reach_weight_ = accumulator_->Sum(reach_weight_, arc.weight);
}
}
}
}
} else {
// Checks each interval against arcs.
auto begin_low = aiter_begin;
auto end_low = aiter_begin;
for (const auto &interval : interval_set) {
begin_low = LowerBound(aiter, end_low, aiter_end, interval.begin);
end_low = LowerBound(aiter, begin_low, aiter_end, interval.end);
if (end_low - begin_low > 0) {
if (reach_begin_ < 0) reach_begin_ = begin_low;
reach_end_ = end_low;
if (compute_weight) {
aiter->SetFlags(kArcWeightValue, kArcValueFlags);
reach_weight_ =
accumulator_->Sum(reach_weight_, aiter, begin_low, end_low);
}
}
}
}
aiter->SetFlags(flags, kArcFlags); // Restores original flag values.
return reach_begin_ >= 0;
}
// Returns iterator position of first matching arc.
ssize_t ReachBegin() const { return reach_begin_; }
// Returns iterator position one past last matching arc.
ssize_t ReachEnd() const { return reach_end_; }
// Return the sum of the weights for matching arcs. Valid only if
// compute_weight was true in Reach() call.
Weight ReachWeight() const { return reach_weight_; }
// Access to the relabeling map. Excludes epsilon (0) label but
// includes kNoLabel that is used internally for super-final
// transitons.
const std::unordered_map<Label, Label> &Label2Index() const {
return *data_->Label2Index();
}
const Data *GetData() const { return data_.get(); }
std::shared_ptr<Data> GetSharedData() const { return data_; }
bool Error() const { return error_ || accumulator_->Error(); }
private:
// Redirects labeled arcs (input or output labels determined by ReachInput())
// to new label-specific final states. Each original final state is
// redirected via a transition labeled with kNoLabel to a new
// kNoLabel-specific final state. Creates super-initial state for all states
// with zero in-degree.
void TransformFst() {
auto ins = fst_->NumStates();
auto ons = ins;
std::vector<ssize_t> indeg(ins, 0);
// Redirects labeled arcs to new final states.
for (StateId s = 0; s < ins; ++s) {
for (MutableArcIterator<VectorFst<Arc>> aiter(fst_.get(), s);
!aiter.Done(); aiter.Next()) {
auto arc = aiter.Value();
const auto label = data_->ReachInput() ? arc.ilabel : arc.olabel;
if (label) {
auto insert_result = label2state_.insert(std::make_pair(label, ons));
if (insert_result.second) {
indeg.push_back(0);
++ons;
}
arc.nextstate = label2state_[label];
aiter.SetValue(arc);
}
++indeg[arc.nextstate]; // Finds in-degrees for next step.
}
// Redirects final weights to new final state.
const auto final_weight = fst_->Final(s);
if (final_weight != Weight::Zero()) {
auto insert_result = label2state_.insert(std::make_pair(kNoLabel, ons));
if (insert_result.second) {
indeg.push_back(0);
++ons;
}
Arc arc(kNoLabel, kNoLabel, final_weight, label2state_[kNoLabel]);
fst_->AddArc(s, arc);
++indeg[arc.nextstate]; // Finds in-degrees for next step.
fst_->SetFinal(s, Weight::Zero());
}
}
// Adds new final states to the FST.
while (fst_->NumStates() < ons) {
StateId s = fst_->AddState();
fst_->SetFinal(s, Weight::One());
}
// Creates a super-initial state for all states with zero in-degree.
const auto start = fst_->AddState();
fst_->SetStart(start);
for (StateId s = 0; s < start; ++s) {
if (indeg[s] == 0) {
Arc arc(0, 0, Weight::One(), s);
fst_->AddArc(start, arc);
}
}
}
void FindIntervals(StateId ins) {
StateReachable<Arc, Label, LabelIntervalSet> state_reachable(*fst_);
if (state_reachable.Error()) {
error_ = true;
return;
}
auto &state2index = state_reachable.State2Index();
auto &interval_sets = *data_->MutableIntervalSets();
interval_sets = state_reachable.IntervalSets();
interval_sets.resize(ins);
auto &label2index = *data_->Label2Index();
for (const auto &kv : label2state_) {
Label i = state2index[kv.second];
label2index[kv.first] = i;
if (kv.first == kNoLabel) data_->SetFinalLabel(i);
}
label2state_.clear();
double nintervals = 0;
ssize_t non_intervals = 0;
for (StateId s = 0; s < ins; ++s) {
nintervals += interval_sets[s].Size();
if (interval_sets[s].Size() > 1) {
++non_intervals;
VLOG(3) << "state: " << s
<< " # of intervals: " << interval_sets[s].Size();
}
}
VLOG(2) << "# of states: " << ins;
VLOG(2) << "# of intervals: " << nintervals;
VLOG(2) << "# of intervals/state: " << nintervals / ins;
VLOG(2) << "# of non-interval states: " << non_intervals;
}
template <class Iterator>
ssize_t LowerBound(Iterator *aiter, ssize_t aiter_begin, ssize_t aiter_end,
Label match_label) const {
// Only needs to compute the ilabel or olabel of arcs when performing the
// binary search.
aiter->SetFlags(reach_fst_input_ ? kArcILabelValue : kArcOLabelValue,
kArcValueFlags);
ssize_t low = aiter_begin;
ssize_t high = aiter_end;
while (low < high) {
const ssize_t mid = low + (high - low) / 2;
aiter->Seek(mid);
auto label =
reach_fst_input_ ? aiter->Value().ilabel : aiter->Value().olabel;
if (label < match_label) {
low = mid + 1;
} else {
high = mid;
}
}
aiter->Seek(low);
aiter->SetFlags(kArcValueFlags, kArcValueFlags);
return low;
}
std::unique_ptr<VectorFst<Arc>> fst_;
// Current state
StateId s_;
// Finds final state for a label
std::unordered_map<Label, StateId> label2state_;
// Iterator position of first match.
ssize_t reach_begin_;
// Iterator position after last match.
ssize_t reach_end_;
// Gives weight sum of arc iterator arcs with reachable labels.
Weight reach_weight_;
// Shareable data between copies.
std::shared_ptr<Data> data_;
// Sums arc weights.
std::unique_ptr<Accumulator> accumulator_;
double ncalls_;
double nintervals_;
bool reach_fst_input_;
bool error_;
};
} // namespace fst
#endif // FST_LABEL_REACHABLE_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/lib/properties.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions for updating property bits for various FST operations and
// string names of the properties.
#include <fst/properties.h>
#include <stddef.h>
#include <vector>
namespace fst {
// These functions determine the properties associated with the FST result of
// various finite-state operations. The property arguments correspond to the
// operation's FST arguments. The properties returned assume the operation
// modifies its first argument. Bitwise-and this result with kCopyProperties for
// the case when a new (possibly delayed) FST is instead constructed.
// Properties for a concatenatively-closed FST.
uint64_t ClosureProperties(uint64_t inprops, bool star, bool delayed) {
auto outprops = (kError | kAcceptor | kUnweighted | kAccessible) & inprops;
if (inprops & kUnweighted) outprops |= kUnweightedCycles;
if (!delayed) {
outprops |=
(kExpanded | kMutable | kCoAccessible | kNotTopSorted | kNotString) &
inprops;
}
if (!delayed || inprops & kAccessible) {
outprops |= (kNotAcceptor | kNonIDeterministic | kNonODeterministic |
kNotILabelSorted | kNotOLabelSorted | kWeighted |
kWeightedCycles | kNotAccessible | kNotCoAccessible) & inprops;
if ((inprops & kWeighted) && (inprops & kAccessible) &&
(inprops & kCoAccessible)) {
outprops |= kWeightedCycles;
}
}
return outprops;
}
// Properties for a complemented FST.
uint64_t ComplementProperties(uint64_t inprops) {
auto outprops = kAcceptor | kUnweighted | kUnweightedCycles | kNoEpsilons |
kNoIEpsilons | kNoOEpsilons | kIDeterministic |
kODeterministic | kAccessible;
outprops |=
(kError | kILabelSorted | kOLabelSorted | kInitialCyclic) & inprops;
if (inprops & kAccessible) {
outprops |= kNotILabelSorted | kNotOLabelSorted | kCyclic;
}
return outprops;
}
// Properties for a composed FST.
uint64_t ComposeProperties(uint64_t inprops1, uint64_t inprops2) {
auto outprops = kError & (inprops1 | inprops2);
if (inprops1 & kAcceptor && inprops2 & kAcceptor) {
outprops |= kAcceptor | kAccessible;
outprops |= (kNoEpsilons | kNoIEpsilons | kNoOEpsilons | kAcyclic |
kInitialAcyclic) &
inprops1 & inprops2;
if (kNoIEpsilons & inprops1 & inprops2) {
outprops |= (kIDeterministic | kODeterministic) & inprops1 & inprops2;
}
} else {
outprops |= kAccessible;
outprops |= (kAcceptor | kNoIEpsilons | kAcyclic | kInitialAcyclic) &
inprops1 & inprops2;
if (kNoIEpsilons & inprops1 & inprops2) {
outprops |= kIDeterministic & inprops1 & inprops2;
}
}
return outprops;
}
// Properties for a concatenated FST.
uint64_t ConcatProperties(uint64_t inprops1, uint64_t inprops2, bool delayed) {
auto outprops = (kAcceptor | kUnweighted | kUnweightedCycles | kAcyclic) &
inprops1 & inprops2;
outprops |= kError & (inprops1 | inprops2);
const bool empty1 = delayed; // Can the first FST be the empty machine?
const bool empty2 = delayed; // Can the second FST be the empty machine?
if (!delayed) {
outprops |= (kExpanded | kMutable | kNotTopSorted | kNotString) & inprops1;
outprops |= (kNotTopSorted | kNotString) & inprops2;
}
if (!empty1) outprops |= (kInitialAcyclic | kInitialCyclic) & inprops1;
if (!delayed || inprops1 & kAccessible) {
outprops |= (kNotAcceptor | kNonIDeterministic | kNonODeterministic |
kEpsilons | kIEpsilons | kOEpsilons | kNotILabelSorted |
kNotOLabelSorted | kWeighted | kWeightedCycles | kCyclic |
kNotAccessible | kNotCoAccessible) &
inprops1;
}
if ((inprops1 & (kAccessible | kCoAccessible)) ==
(kAccessible | kCoAccessible) &&
!empty1) {
outprops |= kAccessible & inprops2;
if (!empty2) outprops |= kCoAccessible & inprops2;
if (!delayed || inprops2 & kAccessible) {
outprops |= (kNotAcceptor | kNonIDeterministic | kNonODeterministic |
kEpsilons | kIEpsilons | kOEpsilons | kNotILabelSorted |
kNotOLabelSorted | kWeighted | kWeightedCycles | kCyclic |
kNotAccessible | kNotCoAccessible) &
inprops2;
}
}
return outprops;
}
// Properties for a determinized FST.
uint64_t DeterminizeProperties(uint64_t inprops, bool has_subsequential_label,
bool distinct_psubsequential_labels) {
auto outprops = kAccessible;
if ((kAcceptor & inprops) ||
((kNoIEpsilons & inprops) && distinct_psubsequential_labels) ||
(has_subsequential_label && distinct_psubsequential_labels)) {
outprops |= kIDeterministic;
}
outprops |= (kError | kAcceptor | kAcyclic | kInitialAcyclic | kCoAccessible |
kString) &
inprops;
if ((inprops & kNoIEpsilons) && distinct_psubsequential_labels) {
outprops |= kNoEpsilons & inprops;
}
if (inprops & kAccessible) {
outprops |= (kIEpsilons | kOEpsilons | kCyclic) & inprops;
}
if (inprops & kAcceptor) outprops |= (kNoIEpsilons | kNoOEpsilons) & inprops;
if ((inprops & kNoIEpsilons) && has_subsequential_label) {
outprops |= kNoIEpsilons;
}
return outprops;
}
// Properties for factored weight FST.
uint64_t FactorWeightProperties(uint64_t inprops) {
auto outprops = (kExpanded | kMutable | kError | kAcceptor | kAcyclic |
kAccessible | kCoAccessible) &
inprops;
if (inprops & kAccessible) {
outprops |= (kNotAcceptor | kNonIDeterministic | kNonODeterministic |
kEpsilons | kIEpsilons | kOEpsilons | kCyclic |
kNotILabelSorted | kNotOLabelSorted) &
inprops;
}
return outprops;
}
// Properties for an inverted FST.
uint64_t InvertProperties(uint64_t inprops) {
auto outprops = (kExpanded | kMutable | kError | kAcceptor | kNotAcceptor |
kEpsilons | kNoEpsilons | kWeighted | kUnweighted |
kWeightedCycles | kUnweightedCycles | kCyclic | kAcyclic |
kInitialCyclic | kInitialAcyclic | kTopSorted |
kNotTopSorted | kAccessible | kNotAccessible |
kCoAccessible | kNotCoAccessible | kString | kNotString) &
inprops;
if (kIDeterministic & inprops) outprops |= kODeterministic;
if (kNonIDeterministic & inprops) outprops |= kNonODeterministic;
if (kODeterministic & inprops) outprops |= kIDeterministic;
if (kNonODeterministic & inprops) outprops |= kNonIDeterministic;
if (kIEpsilons & inprops) outprops |= kOEpsilons;
if (kNoIEpsilons & inprops) outprops |= kNoOEpsilons;
if (kOEpsilons & inprops) outprops |= kIEpsilons;
if (kNoOEpsilons & inprops) outprops |= kNoIEpsilons;
if (kILabelSorted & inprops) outprops |= kOLabelSorted;
if (kNotILabelSorted & inprops) outprops |= kNotOLabelSorted;
if (kOLabelSorted & inprops) outprops |= kILabelSorted;
if (kNotOLabelSorted & inprops) outprops |= kNotILabelSorted;
return outprops;
}
// Properties for a projected FST.
uint64_t ProjectProperties(uint64_t inprops, bool project_input) {
auto outprops = kAcceptor;
outprops |= (kExpanded | kMutable | kError | kWeighted | kUnweighted |
kWeightedCycles | kUnweightedCycles |
kCyclic | kAcyclic | kInitialCyclic | kInitialAcyclic |
kTopSorted | kNotTopSorted | kAccessible | kNotAccessible |
kCoAccessible | kNotCoAccessible | kString | kNotString) &
inprops;
if (project_input) {
outprops |= (kIDeterministic | kNonIDeterministic | kIEpsilons |
kNoIEpsilons | kILabelSorted | kNotILabelSorted) &
inprops;
if (kIDeterministic & inprops) outprops |= kODeterministic;
if (kNonIDeterministic & inprops) outprops |= kNonODeterministic;
if (kIEpsilons & inprops) outprops |= kOEpsilons | kEpsilons;
if (kNoIEpsilons & inprops) outprops |= kNoOEpsilons | kNoEpsilons;
if (kILabelSorted & inprops) outprops |= kOLabelSorted;
if (kNotILabelSorted & inprops) outprops |= kNotOLabelSorted;
} else {
outprops |= (kODeterministic | kNonODeterministic | kOEpsilons |
kNoOEpsilons | kOLabelSorted | kNotOLabelSorted) &
inprops;
if (kODeterministic & inprops) outprops |= kIDeterministic;
if (kNonODeterministic & inprops) outprops |= kNonIDeterministic;
if (kOEpsilons & inprops) outprops |= kIEpsilons | kEpsilons;
if (kNoOEpsilons & inprops) outprops |= kNoIEpsilons | kNoEpsilons;
if (kOLabelSorted & inprops) outprops |= kILabelSorted;
if (kNotOLabelSorted & inprops) outprops |= kNotILabelSorted;
}
return outprops;
}
// Properties for a randgen FST.
uint64_t RandGenProperties(uint64_t inprops, bool weighted) {
auto outprops = kAcyclic | kInitialAcyclic | kAccessible | kUnweightedCycles;
outprops |= inprops & kError;
if (weighted) {
outprops |= kTopSorted;
outprops |=
(kAcceptor | kNoEpsilons | kNoIEpsilons | kNoOEpsilons |
kIDeterministic | kODeterministic | kILabelSorted | kOLabelSorted) &
inprops;
} else {
outprops |= kUnweighted;
outprops |= (kAcceptor | kILabelSorted | kOLabelSorted) & inprops;
}
return outprops;
}
// Properties for a replace FST.
uint64_t ReplaceProperties(const std::vector<uint64_t>& inprops, std::ptrdiff_t root,
bool epsilon_on_call, bool epsilon_on_return,
bool out_epsilon_on_call, bool out_epsilon_on_return,
bool replace_transducer, bool no_empty_fsts,
bool all_ilabel_sorted, bool all_olabel_sorted,
bool all_negative_or_dense) {
if (inprops.empty()) return kNullProperties;
uint64_t outprops = 0;
for (auto inprop : inprops) outprops |= kError & inprop;
uint64_t access_props = no_empty_fsts ? kAccessible | kCoAccessible : 0;
for (auto inprop : inprops) {
access_props &= (inprop & (kAccessible | kCoAccessible));
}
if (access_props == (kAccessible | kCoAccessible)) {
outprops |= access_props;
if (inprops[root] & kInitialCyclic) outprops |= kInitialCyclic;
uint64_t props = 0;
bool string = true;
for (auto inprop : inprops) {
if (replace_transducer) props |= kNotAcceptor & inprop;
props |= (kNonIDeterministic | kNonODeterministic | kEpsilons |
kIEpsilons | kOEpsilons | kWeighted | kWeightedCycles |
kCyclic | kNotTopSorted | kNotString) & inprop;
if (!(inprop & kString)) string = false;
}
outprops |= props;
if (string) outprops |= kString;
}
bool acceptor = !replace_transducer;
bool ideterministic = !epsilon_on_call && epsilon_on_return;
bool no_iepsilons = !epsilon_on_call && !epsilon_on_return;
bool acyclic = true;
bool unweighted = true;
for (size_t i = 0; i < inprops.size(); ++i) {
if (!(inprops[i] & kAcceptor)) acceptor = false;
if (!(inprops[i] & kIDeterministic)) ideterministic = false;
if (!(inprops[i] & kNoIEpsilons)) no_iepsilons = false;
if (!(inprops[i] & kAcyclic)) acyclic = false;
if (!(inprops[i] & kUnweighted)) unweighted = false;
if (i != root && !(inprops[i] & kNoIEpsilons)) ideterministic = false;
}
if (acceptor) outprops |= kAcceptor;
if (ideterministic) outprops |= kIDeterministic;
if (no_iepsilons) outprops |= kNoIEpsilons;
if (acyclic) outprops |= kAcyclic;
if (unweighted) outprops |= kUnweighted;
if (inprops[root] & kInitialAcyclic) outprops |= kInitialAcyclic;
// We assume that all terminals are positive. The resulting ReplaceFst is
// known to be kILabelSorted when: (1) all sub-FSTs are kILabelSorted, (2) the
// input label of the return arc is epsilon, and (3) one of the 3 following
// conditions is satisfied:
//
// 1. the input label of the call arc is not epsilon
// 2. all non-terminals are negative, or
// 3. all non-terninals are positive and form a dense range containing 1.
if (all_ilabel_sorted && epsilon_on_return &&
(!epsilon_on_call || all_negative_or_dense)) {
outprops |= kILabelSorted;
}
// Similarly, the resulting ReplaceFst is known to be kOLabelSorted when: (1)
// all sub-FSTs are kOLabelSorted, (2) the output label of the return arc is
// epsilon, and (3) one of the 3 following conditions is satisfied:
//
// 1. the output label of the call arc is not epsilon
// 2. all non-terminals are negative, or
// 3. all non-terninals are positive and form a dense range containing 1.
if (all_olabel_sorted && out_epsilon_on_return &&
(!out_epsilon_on_call || all_negative_or_dense)) {
outprops |= kOLabelSorted;
}
return outprops;
}
// Properties for a relabeled FST.
uint64_t RelabelProperties(uint64_t inprops) {
static constexpr auto outprops =
kExpanded | kMutable | kError | kWeighted | kUnweighted |
kWeightedCycles | kUnweightedCycles | kCyclic | kAcyclic |
kInitialCyclic | kInitialAcyclic | kTopSorted | kNotTopSorted |
kAccessible | kNotAccessible | kCoAccessible | kNotCoAccessible |
kString | kNotString;
return outprops & inprops;
}
// Properties for a reversed FST (the superinitial state limits this set).
uint64_t ReverseProperties(uint64_t inprops, bool has_superinitial) {
auto outprops = (kExpanded | kMutable | kError | kAcceptor | kNotAcceptor |
kEpsilons | kIEpsilons | kOEpsilons | kUnweighted | kCyclic |
kAcyclic | kWeightedCycles | kUnweightedCycles) &
inprops;
if (has_superinitial) outprops |= kWeighted & inprops;
return outprops;
}
// Properties for re-weighted FST.
uint64_t ReweightProperties(uint64_t inprops) {
auto outprops = inprops & kWeightInvariantProperties;
outprops = outprops & ~kCoAccessible;
return outprops;
}
// Properties for an epsilon-removed FST.
uint64_t RmEpsilonProperties(uint64_t inprops, bool delayed) {
auto outprops = kNoEpsilons;
outprops |= (kError | kAcceptor | kAcyclic | kInitialAcyclic) & inprops;
if (inprops & kAcceptor) outprops |= kNoIEpsilons | kNoOEpsilons;
if (!delayed) {
outprops |= kExpanded | kMutable;
outprops |= kTopSorted & inprops;
}
if (!delayed || inprops & kAccessible) outprops |= kNotAcceptor & inprops;
return outprops;
}
// Properties for shortest path. This function computes how the properties of
// the output of shortest path need to be updated, given that 'props' is already
// known.
uint64_t ShortestPathProperties(uint64_t props, bool tree) {
auto outprops =
props | kAcyclic | kInitialAcyclic | kAccessible | kUnweightedCycles;
if (!tree) outprops |= kCoAccessible;
return outprops;
}
// Properties for a synchronized FST.
uint64_t SynchronizeProperties(uint64_t inprops) {
auto outprops = (kError | kAcceptor | kAcyclic | kAccessible | kCoAccessible |
kUnweighted | kUnweightedCycles) &
inprops;
if (inprops & kAccessible) {
outprops |= (kCyclic | kNotCoAccessible | kWeighted | kWeightedCycles) &
inprops;
}
return outprops;
}
// Properties for a unioned FST.
uint64_t UnionProperties(uint64_t inprops1, uint64_t inprops2, bool delayed) {
auto outprops =
(kAcceptor | kUnweighted | kUnweightedCycles | kAcyclic | kAccessible) &
inprops1 & inprops2;
outprops |= kError & (inprops1 | inprops2);
outprops |= kInitialAcyclic;
bool empty1 = delayed; // Can the first FST be the empty machine?
bool empty2 = delayed; // Can the second FST be the empty machine?
if (!delayed) {
outprops |= (kExpanded | kMutable | kNotTopSorted) & inprops1;
outprops |= kNotTopSorted & inprops2;
}
if (!empty1 && !empty2) {
outprops |= kEpsilons | kIEpsilons | kOEpsilons;
outprops |= kCoAccessible & inprops1 & inprops2;
}
// Note kNotCoAccessible does not hold because of kInitialAcyclic option.
if (!delayed || inprops1 & kAccessible) {
outprops |=
(kNotAcceptor | kNonIDeterministic | kNonODeterministic | kEpsilons |
kIEpsilons | kOEpsilons | kNotILabelSorted | kNotOLabelSorted |
kWeighted | kWeightedCycles | kCyclic | kNotAccessible) &
inprops1;
}
if (!delayed || inprops2 & kAccessible) {
outprops |= (kNotAcceptor | kNonIDeterministic | kNonODeterministic |
kEpsilons | kIEpsilons | kOEpsilons | kNotILabelSorted |
kNotOLabelSorted | kWeighted | kWeightedCycles | kCyclic |
kNotAccessible | kNotCoAccessible) &
inprops2;
}
return outprops;
}
// Property string names (indexed by bit position).
const char* PropertyNames[] = {
// Binary.
"expanded", "mutable", "error", "", "", "", "", "", "", "", "", "", "", "",
"", "",
// Ternary.
"acceptor", "not acceptor", "input deterministic",
"non input deterministic", "output deterministic",
"non output deterministic", "input/output epsilons",
"no input/output epsilons", "input epsilons", "no input epsilons",
"output epsilons", "no output epsilons", "input label sorted",
"not input label sorted", "output label sorted", "not output label sorted",
"weighted", "unweighted", "cyclic", "acyclic", "cyclic at initial state",
"acyclic at initial state", "top sorted", "not top sorted", "accessible",
"not accessible", "coaccessible", "not coaccessible", "string",
"not string", "weighted cycles", "unweighted cycles"};
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/project.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Functions and classes to project an FST on to its domain or range.
#ifndef FST_PROJECT_H_
#define FST_PROJECT_H_
#include <fst/arc-map.h>
#include <fst/mutable-fst.h>
namespace fst {
// This specifies whether to project on input or output.
enum ProjectType { PROJECT_INPUT = 1, PROJECT_OUTPUT = 2 };
// Mapper to implement projection per arc.
template <class A>
class ProjectMapper {
public:
using FromArc = A;
using ToArc = A;
explicit ProjectMapper(ProjectType project_type)
: project_type_(project_type) {}
ToArc operator()(const FromArc &arc) const {
const auto label = project_type_ == PROJECT_INPUT ? arc.ilabel : arc.olabel;
return ToArc(label, label, arc.weight, arc.nextstate);
}
constexpr MapFinalAction FinalAction() const {
return MAP_NO_SUPERFINAL;
}
MapSymbolsAction InputSymbolsAction() const {
return project_type_ == PROJECT_INPUT ? MAP_COPY_SYMBOLS
: MAP_CLEAR_SYMBOLS;
}
MapSymbolsAction OutputSymbolsAction() const {
return project_type_ == PROJECT_OUTPUT ? MAP_COPY_SYMBOLS
: MAP_CLEAR_SYMBOLS;
}
uint64_t Properties(uint64_t props) const {
return ProjectProperties(props, project_type_ == PROJECT_INPUT);
}
private:
const ProjectType project_type_;
};
// Projects an FST onto its domain or range by either copying each arcs' input
// label to the output label or vice versa.
//
// Complexity:
//
// Time: O(V + E)
// Space: O(1)
//
// where V is the number of states and E is the number of arcs.
template <class Arc>
inline void Project(const Fst<Arc> &ifst, MutableFst<Arc> *ofst,
ProjectType project_type) {
ArcMap(ifst, ofst, ProjectMapper<Arc>(project_type));
switch (project_type) {
case PROJECT_INPUT:
ofst->SetOutputSymbols(ifst.InputSymbols());
return;
case PROJECT_OUTPUT:
ofst->SetInputSymbols(ifst.OutputSymbols());
return;
}
}
// Destructive variant of the above.
template <class Arc>
inline void Project(MutableFst<Arc> *fst, ProjectType project_type) {
ArcMap(fst, ProjectMapper<Arc>(project_type));
switch (project_type) {
case PROJECT_INPUT:
fst->SetOutputSymbols(fst->InputSymbols());
return;
case PROJECT_OUTPUT:
fst->SetInputSymbols(fst->OutputSymbols());
return;
}
}
// Projects an FST onto its domain or range by either copying each arc's input
// label to the output label or vice versa. This version is a delayed FST.
//
// Complexity:
//
// Time: O(v + e)
// Space: O(1)
//
// where v is the number of states visited and e is the number of arcs visited.
// Constant time and to visit an input state or arc is assumed and exclusive of
// caching.
template <class A>
class ProjectFst : public ArcMapFst<A, A, ProjectMapper<A>> {
public:
using FromArc = A;
using ToArc = A;
using Impl = internal::ArcMapFstImpl<A, A, ProjectMapper<A>>;
ProjectFst(const Fst<A> &fst, ProjectType project_type)
: ArcMapFst<A, A, ProjectMapper<A>>(fst, ProjectMapper<A>(project_type)) {
if (project_type == PROJECT_INPUT) {
GetMutableImpl()->SetOutputSymbols(fst.InputSymbols());
}
if (project_type == PROJECT_OUTPUT) {
GetMutableImpl()->SetInputSymbols(fst.OutputSymbols());
}
}
// See Fst<>::Copy() for doc.
ProjectFst(const ProjectFst<A> &fst, bool safe = false)
: ArcMapFst<A, A, ProjectMapper<A>>(fst, safe) {}
// Gets a copy of this ProjectFst. See Fst<>::Copy() for further doc.
ProjectFst<A> *Copy(bool safe = false) const override {
return new ProjectFst(*this, safe);
}
private:
using ImplToFst<Impl>::GetMutableImpl;
};
// Specialization for ProjectFst.
template <class A>
class StateIterator<ProjectFst<A>>
: public StateIterator<ArcMapFst<A, A, ProjectMapper<A>>> {
public:
explicit StateIterator(const ProjectFst<A> &fst)
: StateIterator<ArcMapFst<A, A, ProjectMapper<A>>>(fst) {}
};
// Specialization for ProjectFst.
template <class A>
class ArcIterator<ProjectFst<A>>
: public ArcIterator<ArcMapFst<A, A, ProjectMapper<A>>> {
public:
using StateId = typename A::StateId;
ArcIterator(const ProjectFst<A> &fst, StateId s)
: ArcIterator<ArcMapFst<A, A, ProjectMapper<A>>>(fst, s) {}
};
// Useful alias when using StdArc.
using StdProjectFst = ProjectFst<StdArc>;
} // namespace fst
#endif // FST_PROJECT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/compact/compact16_string-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/fst.h>
#include <fst/compact-fst.h>
namespace fst {
static FstRegisterer<CompactStringFst<StdArc, uint16>>
CompactStringFst_StdArc_uint16_registerer;
static FstRegisterer<CompactStringFst<LogArc, uint16>>
CompactStringFst_LogArc_uint16_registerer;
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/kenlm/lm | coqui_public_repos/STT/native_client/kenlm/lm/interpolate/tune_instances.hh | #ifndef LM_INTERPOLATE_TUNE_INSTANCE_H
#define LM_INTERPOLATE_TUNE_INSTANCE_H
#include "tune_matrix.hh"
#include "../word_index.hh"
#include "../../util/scoped.hh"
#include "../../util/stream/config.hh"
#include "../../util/string_piece.hh"
#include <boost/optional.hpp>
#include <vector>
namespace util { namespace stream {
class Chain;
class FileBuffer;
}} // namespaces
namespace lm { namespace interpolate {
typedef uint32_t InstanceIndex;
typedef uint32_t ModelIndex;
struct Extension {
// Which tuning instance does this belong to?
InstanceIndex instance;
WordIndex word;
ModelIndex model;
// ln p_{model} (word | context(instance))
float ln_prob;
bool operator<(const Extension &other) const;
};
class ExtensionsFirstIteration;
struct InstancesConfig {
// For batching the model reads. This is per order.
std::size_t model_read_chain_mem;
// This is being sorted, make it larger.
std::size_t extension_write_chain_mem;
std::size_t lazy_memory;
util::stream::SortConfig sort;
};
class Instances {
private:
typedef Eigen::Matrix<Accum, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> BackoffMatrix;
public:
Instances(int tune_file, const std::vector<StringPiece> &model_names, const InstancesConfig &config);
// For destruction of forward-declared classes.
~Instances();
// Full backoff from unigram for each model.
typedef BackoffMatrix::ConstRowXpr FullBackoffs;
FullBackoffs LNBackoffs(InstanceIndex instance) const {
return ln_backoffs_.row(instance);
}
InstanceIndex NumInstances() const { return ln_backoffs_.rows(); }
const Vector &CorrectGradientTerm() const { return neg_ln_correct_sum_; }
const Matrix &LNUnigrams() const { return ln_unigrams_; }
// Entry size to use to configure the chain (since in practice order is needed).
std::size_t ReadExtensionsEntrySize() const;
void ReadExtensions(util::stream::Chain &chain);
// Vocab id of the beginning of sentence. Used to ignore it for normalization.
WordIndex BOS() const { return bos_; }
private:
// Allow the derivatives test to get access.
friend class MockInstances;
Instances();
// backoffs_(instance, model) is the backoff all the way to unigrams.
BackoffMatrix ln_backoffs_;
// neg_correct_sum_(model) = -\sum_{instances} ln p_{model}(correct(instance) | context(instance)).
// This appears as a term in the gradient.
Vector neg_ln_correct_sum_;
// ln_unigrams_(word, model) = ln p_{model}(word).
Matrix ln_unigrams_;
// This is the source of data for the first iteration.
util::scoped_ptr<ExtensionsFirstIteration> extensions_first_;
// Source of data for subsequent iterations. This contains already-sorted data.
util::scoped_ptr<util::stream::FileBuffer> extensions_subsequent_;
WordIndex bos_;
std::string temp_prefix_;
};
}} // namespaces
#endif // LM_INTERPOLATE_TUNE_INSTANCE_H
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/kenlm_linux-arm64-cpu-opt.yml | build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.kenlm.linux_arm64_cpu.url}
artifact_namespace: ${system.kenlm.linux_arm64_cpu.namespace}
system_setup:
>
apt-get -qq update && apt-get -qq -y install cmake wget pixz bzip2 multistrap
scripts:
setup: "taskcluster/kenlm_tc-setup.sh --linux-arm64"
build: "taskcluster/kenlm_tc-build.sh --linux-arm64"
package: "taskcluster/kenlm_tc-package.sh"
workerType: "${docker.dsBuild}"
metadata:
name: "KenLM Linux ARM64 CPU"
description: "Building KenLM for Linux/ARM64, CPU only, optimized version"
| 0 |
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson | coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson/internal/strtod.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef CEREAL_RAPIDJSON_STRTOD_
#define CEREAL_RAPIDJSON_STRTOD_
#include "ieee754.h"
#include "biginteger.h"
#include "diyfp.h"
#include "pow10.h"
#include <climits>
#include <limits>
CEREAL_RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline double FastPath(double significand, int exp) {
if (exp < -308)
return 0.0;
else if (exp >= 0)
return significand * internal::Pow10(exp);
else
return significand / internal::Pow10(-exp);
}
inline double StrtodNormalPrecision(double d, int p) {
if (p < -308) {
// Prevent expSum < -308, making Pow10(p) = 0
d = FastPath(d, -308);
d = FastPath(d, p + 308);
}
else
d = FastPath(d, p);
return d;
}
template <typename T>
inline T Min3(T a, T b, T c) {
T m = a;
if (m > b) m = b;
if (m > c) m = c;
return m;
}
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
const Double db(b);
const uint64_t bInt = db.IntegerSignificand();
const int bExp = db.IntegerExponent();
const int hExp = bExp - 1;
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
// Adjust for decimal exponent
if (dExp >= 0) {
dS_Exp2 += dExp;
dS_Exp5 += dExp;
}
else {
bS_Exp2 -= dExp;
bS_Exp5 -= dExp;
hS_Exp2 -= dExp;
hS_Exp5 -= dExp;
}
// Adjust for binary exponent
if (bExp >= 0)
bS_Exp2 += bExp;
else {
dS_Exp2 -= bExp;
hS_Exp2 -= bExp;
}
// Adjust for half ulp exponent
if (hExp >= 0)
hS_Exp2 += hExp;
else {
dS_Exp2 -= hExp;
bS_Exp2 -= hExp;
}
// Remove common power of two factor from all three scaled values
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
dS_Exp2 -= common_Exp2;
bS_Exp2 -= common_Exp2;
hS_Exp2 -= common_Exp2;
BigInteger dS = d;
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
BigInteger bS(bInt);
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
BigInteger hS(1);
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
BigInteger delta(0);
dS.Difference(bS, &delta);
return delta.Compare(hS);
}
inline bool StrtodFast(double d, int p, double* result) {
// Use fast path for string-to-double conversion if possible
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
if (p > 22 && p < 22 + 16) {
// Fast Path Cases In Disguise
d *= internal::Pow10(p - 22);
p = 22;
}
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
*result = FastPath(d, p);
return true;
}
else
return false;
}
// Compute an approximation and see if it is within 1/2 ULP
inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
uint64_t significand = 0;
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > CEREAL_RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == CEREAL_RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
}
if (i < dLen && decimals[i] >= '5') // Rounding
significand++;
int remaining = dLen - i;
const int kUlpShift = 3;
const int kUlp = 1 << kUlpShift;
int64_t error = (remaining == 0) ? 0 : kUlp / 2;
DiyFp v(significand, 0);
v = v.Normalize();
error <<= -v.e;
dExp += remaining;
int actualExp;
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
if (actualExp != dExp) {
static const DiyFp kPow10[] = {
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6
DiyFp(CEREAL_RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7
};
int adjustment = dExp - actualExp;
CEREAL_RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8);
v = v * kPow10[adjustment - 1];
if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit
error += kUlp / 2;
}
v = v * cachedPower;
error += kUlp + (error == 0 ? 0 : 1);
const int oldExp = v.e;
v = v.Normalize();
error <<= oldExp - v.e;
const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
int precisionSize = 64 - effectiveSignificandSize;
if (precisionSize + kUlpShift >= 64) {
int scaleExp = (precisionSize + kUlpShift) - 63;
v.f >>= scaleExp;
v.e += scaleExp;
error = (error >> scaleExp) + 1 + kUlp;
precisionSize -= scaleExp;
}
DiyFp rounded(v.f >> precisionSize, v.e + precisionSize);
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
rounded.f++;
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
rounded.f >>= 1;
rounded.e++;
}
}
*result = rounded.ToDouble();
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
CEREAL_RAPIDJSON_ASSERT(dLen >= 0);
const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
Double a(approx);
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
if (cmp < 0)
return a.Value(); // within half ULP
else if (cmp == 0) {
// Round towards even
if (a.Significand() & 1)
return a.NextPositiveDouble();
else
return a.Value();
}
else // adjustment
return a.NextPositiveDouble();
}
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
CEREAL_RAPIDJSON_ASSERT(d >= 0.0);
CEREAL_RAPIDJSON_ASSERT(length >= 1);
double result = 0.0;
if (StrtodFast(d, p, &result))
return result;
CEREAL_RAPIDJSON_ASSERT(length <= INT_MAX);
int dLen = static_cast<int>(length);
CEREAL_RAPIDJSON_ASSERT(length >= decimalPosition);
CEREAL_RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX);
int dExpAdjust = static_cast<int>(length - decimalPosition);
CEREAL_RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust);
int dExp = exp - dExpAdjust;
// Make sure length+dExp does not overflow
CEREAL_RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen);
// Trim leading zeros
while (dLen > 0 && *decimals == '0') {
dLen--;
decimals++;
}
// Trim trailing zeros
while (dLen > 0 && decimals[dLen - 1] == '0') {
dLen--;
dExp++;
}
if (dLen == 0) { // Buffer only contains zeros.
return 0.0;
}
// Trim right-most digits
const int kMaxDecimalDigit = 767 + 1;
if (dLen > kMaxDecimalDigit) {
dExp += dLen - kMaxDecimalDigit;
dLen = kMaxDecimalDigit;
}
// If too small, underflow to zero.
// Any x <= 10^-324 is interpreted as zero.
if (dLen + dExp <= -324)
return 0.0;
// If too large, overflow to infinity.
// Any x >= 10^309 is interpreted as +infinity.
if (dLen + dExp > 309)
return std::numeric_limits<double>::infinity();
if (StrtodDiyFp(decimals, dLen, dExp, &result))
return result;
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
return StrtodBigInteger(result, decimals, dLen, dExp);
}
} // namespace internal
CEREAL_RAPIDJSON_NAMESPACE_END
#endif // CEREAL_RAPIDJSON_STRTOD_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/special/rho-fst.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/extensions/special/rho-fst.h>
#include <fst/fst.h>
DEFINE_int64(rho_fst_rho_label, 0,
"Label of transitions to be interpreted as rho ('rest') "
"transitions");
DEFINE_string(rho_fst_rewrite_mode, "auto",
"Rewrite both sides when matching? One of:"
" \"auto\" (rewrite iff acceptor), \"always\", \"never\"");
namespace fst {
const char rho_fst_type[] = "rho";
const char input_rho_fst_type[] = "input_rho";
const char output_rho_fst_type[] = "output_rho";
static FstRegisterer<StdRhoFst> RhoFst_StdArc_registerer;
static FstRegisterer<LogRhoFst> RhoFst_LogArc_registerer;
static FstRegisterer<Log64RhoFst> RhoFst_Log64Arc_registerer;
static FstRegisterer<StdInputRhoFst> InputRhoFst_StdArc_registerer;
static FstRegisterer<LogInputRhoFst> InputRhoFst_LogArc_registerer;
static FstRegisterer<Log64InputRhoFst> InputRhoFst_Log64Arc_registerer;
static FstRegisterer<StdOutputRhoFst> OutputRhoFst_StdArc_registerer;
static FstRegisterer<LogOutputRhoFst> OutputRhoFst_LogArc_registerer;
static FstRegisterer<Log64OutputRhoFst> OutputRhoFst_Log64Arc_registerer;
} // namespace fst
| 0 |
coqui_public_repos/STT/native_client/kenlm/lm | coqui_public_repos/STT/native_client/kenlm/lm/interpolate/pipeline.hh | #ifndef LM_INTERPOLATE_PIPELINE_H
#define LM_INTERPOLATE_PIPELINE_H
#include "../common/model_buffer.hh"
#include "../../util/fixed_array.hh"
#include "../../util/stream/config.hh"
#include <cstddef>
#include <string>
namespace lm { namespace interpolate {
struct Config {
std::vector<float> lambdas;
util::stream::SortConfig sort;
std::size_t BufferSize() const { return sort.buffer_size; }
};
void Pipeline(util::FixedArray<ModelBuffer> &models, const Config &config, int write_file);
}} // namespaces
#endif // LM_INTERPOLATE_PIPELINE_H
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/Makefile.am | if HAVE_COMPRESS
compress_include_headers = fst/extensions/compress/compress.h \
fst/extensions/compress/compress-script.h fst/extensions/compress/gzfile.h \
fst/extensions/compress/elias.h fst/extensions/compress/randmod.h
endif
if HAVE_FAR
far_include_headers = fst/extensions/far/compile-strings.h \
fst/extensions/far/create.h fst/extensions/far/equal.h \
fst/extensions/far/extract.h fst/extensions/far/far.h \
fst/extensions/far/far-class.h fst/extensions/far/farlib.h \
fst/extensions/far/farscript.h fst/extensions/far/getters.h \
fst/extensions/far/info.h fst/extensions/far/isomorphic.h \
fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \
fst/extensions/far/stlist.h fst/extensions/far/sttable.h
endif
if HAVE_LINEAR
linear_include_headers = fst/extensions/linear/linear-fst-data-builder.h \
fst/extensions/linear/linear-fst-data.h fst/extensions/linear/linear-fst.h \
fst/extensions/linear/linearscript.h fst/extensions/linear/loglinear-apply.h \
fst/extensions/linear/trie.h
endif
if HAVE_MPDT
mpdt_include_headers = fst/extensions/mpdt/compose.h \
fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \
fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \
fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \
fst/extensions/mpdt/reverse.h
endif
if HAVE_NGRAM
ngram_include_headers = fst/extensions/ngram/bitmap-index.h \
fst/extensions/ngram/ngram-fst.h fst/extensions/ngram/nthbit.h
endif
if HAVE_PDT
pdt_include_headers = fst/extensions/pdt/collection.h \
fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \
fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \
fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \
fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \
fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \
fst/extensions/pdt/shortest-path.h
endif
if HAVE_SPECIAL
special_include_headers = fst/extensions/special/phi-fst.h \
fst/extensions/special/rho-fst.h fst/extensions/special/sigma-fst.h
endif
if HAVE_GRM
far_include_headers = fst/extensions/far/compile-strings.h \
fst/extensions/far/create.h fst/extensions/far/equal.h \
fst/extensions/far/extract.h fst/extensions/far/far.h \
fst/extensions/far/far-class.h fst/extensions/far/farlib.h \
fst/extensions/far/farscript.h fst/extensions/far/getters.h \
fst/extensions/far/info.h fst/extensions/far/isomorphic.h \
fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \
fst/extensions/far/stlist.h fst/extensions/far/sttable.h
mpdt_include_headers = fst/extensions/mpdt/compose.h \
fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \
fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \
fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \
fst/extensions/mpdt/reverse.h
pdt_include_headers = fst/extensions/pdt/collection.h \
fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \
fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \
fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \
fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \
fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \
fst/extensions/pdt/shortest-path.h
endif
script_include_headers = fst/script/arc-class.h \
fst/script/arciterator-class.h fst/script/arcsort.h \
fst/script/arg-packs.h fst/script/closure.h fst/script/compile-impl.h \
fst/script/compile.h fst/script/compose.h fst/script/concat.h \
fst/script/connect.h fst/script/convert.h fst/script/decode.h \
fst/script/determinize.h fst/script/difference.h fst/script/disambiguate.h \
fst/script/draw-impl.h fst/script/draw.h fst/script/encode.h \
fst/script/encodemapper-class.h fst/script/epsnormalize.h fst/script/equal.h \
fst/script/equivalent.h fst/script/fst-class.h fst/script/fstscript.h \
fst/script/getters.h fst/script/info-impl.h fst/script/info.h \
fst/script/intersect.h fst/script/invert.h fst/script/isomorphic.h \
fst/script/map.h fst/script/minimize.h fst/script/print-impl.h \
fst/script/print.h fst/script/project.h fst/script/prune.h \
fst/script/push.h fst/script/randequivalent.h fst/script/randgen.h \
fst/script/register.h fst/script/relabel.h fst/script/replace.h \
fst/script/reverse.h fst/script/reweight.h fst/script/rmepsilon.h \
fst/script/script-impl.h fst/script/shortest-distance.h \
fst/script/shortest-path.h fst/script/stateiterator-class.h \
fst/script/synchronize.h fst/script/text-io.h fst/script/topsort.h \
fst/script/union.h fst/script/weight-class.h fst/script/fstscript-decl.h \
fst/script/verify.h
nobase_include_HEADERS = fst/accumulator.h fst/add-on.h fst/arc-arena.h \
fst/arc-map.h fst/arc.h fst/arcfilter.h fst/arcsort.h fst/bi-table.h \
fst/cache.h fst/closure.h fst/compact-fst.h fst/compat.h fst/complement.h \
fst/compose-filter.h fst/compose.h fst/concat.h fst/config.h fst/connect.h \
fst/const-fst.h fst/determinize.h fst/dfs-visit.h fst/difference.h \
fst/disambiguate.h fst/edit-fst.h fst/encode.h fst/epsnormalize.h fst/equal.h \
fst/equivalent.h fst/expanded-fst.h fst/expectation-weight.h \
fst/factor-weight.h fst/filter-state.h fst/flags.h fst/float-weight.h \
fst/fst-decl.h fst/fst.h fst/fstlib.h fst/generic-register.h fst/heap.h \
fst/icu.h fst/intersect.h fst/interval-set.h fst/invert.h fst/isomorphic.h \
fst/label-reachable.h fst/lexicographic-weight.h fst/lock.h fst/log.h \
fst/lookahead-filter.h fst/lookahead-matcher.h fst/map.h fst/mapped-file.h \
fst/matcher-fst.h fst/matcher.h fst/memory.h fst/minimize.h fst/mutable-fst.h \
fst/pair-weight.h fst/partition.h fst/power-weight.h fst/product-weight.h \
fst/project.h fst/properties.h fst/prune.h fst/push.h fst/queue.h \
fst/randequivalent.h fst/randgen.h fst/rational.h fst/register.h \
fst/relabel.h fst/replace-util.h fst/replace.h fst/reverse.h fst/reweight.h \
fst/rmepsilon.h fst/rmfinalepsilon.h fst/set-weight.h fst/shortest-distance.h \
fst/shortest-path.h fst/signed-log-weight.h fst/sparse-power-weight.h \
fst/sparse-tuple-weight.h fst/state-map.h fst/state-reachable.h \
fst/state-table.h fst/statesort.h fst/string-weight.h fst/string.h \
fst/symbol-table-ops.h fst/symbol-table.h fst/synchronize.h \
fst/test-properties.h fst/topsort.h fst/tuple-weight.h fst/types.h \
fst/union-find.h fst/union-weight.h fst/union.h fst/util.h fst/vector-fst.h \
fst/verify.h fst/visit.h fst/weight.h \
$(compress_include_headers) \
$(far_include_headers) \
$(linear_include_headers) \
$(mpdt_include_headers) \
$(ngram_include_headers) \
$(pdt_include_headers) \
$(script_include_headers) \
$(special_include_headers)
| 0 |
coqui_public_repos/STT-models/hakha-chin/itml | coqui_public_repos/STT-models/hakha-chin/itml/v0.1.0/MODEL_CARD.md | # Model card for Hakha Chin STT
Jump to section:
- [Model details](#model-details)
- [Intended use](#intended-use)
- [Performance Factors](#performance-factors)
- [Metrics](#metrics)
- [Training data](#training-data)
- [Evaluation data](#evaluation-data)
- [Ethical considerations](#ethical-considerations)
- [Caveats and recommendations](#caveats-and-recommendations)
## Model details
- Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group.
- Model language: Hakha Chin / Hakha Lai / `cnh`
- Model date: April 9, 2021
- Model type: `Speech-to-Text`
- Model version: `v0.1.0`
- Compatible with 🐸 STT version: `v0.9.3`
- License: AGPL
- Citation details: `@techreport{hakhachin-stt, author = {Tyers,Francis}, title = {Hakha Chin STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-CNH-0.1} }`
- Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/).
## Intended use
Speech-to-Text for the [Hakha Chin Language](https://en.wikipedia.org/wiki/Hakha_Chin_language) on 16kHz, mono-channel audio.
## Performance Factors
Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
## Metrics
STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk.
#### Transcription Accuracy
The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/cnh/).
|Test Corpus|WER|CER|
|-----------|---|---|
|Common Voice|77.8\%|32.1\%|
#### Real-Time Factor
Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF.
Recorded average RTF on laptop CPU: ``
#### Model Size
`model.pbmm`: 181M
`model.tflite`: 46M
### Approaches to uncertainty and variability
Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio.
## Training data
This model was trained on Common Voice 6.1 train.
## Evaluation data
The Model was evaluated on Common Voice 6.1 test.
## Ethical considerations
Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use.
### Demographic Bias
You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue.
### Surveillance
Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech.
## Caveats and recommendations
Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data).
In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/linear/Makefile.in | # Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
@HAVE_BIN_TRUE@bin_PROGRAMS = fstlinear$(EXEEXT) \
@HAVE_BIN_TRUE@ fstloglinearapply$(EXEEXT)
subdir = src/extensions/linear
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/depcomp
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h \
$(top_builddir)/src/include/fst/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)" \
"$(DESTDIR)$(bindir)"
LTLIBRARIES = $(lib_LTLIBRARIES) $(libfst_LTLIBRARIES)
am__DEPENDENCIES_1 =
@HAVE_SCRIPT_TRUE@libfstlinearscript_la_DEPENDENCIES = \
@HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__libfstlinearscript_la_SOURCES_DIST = linearscript.cc
@HAVE_SCRIPT_TRUE@am_libfstlinearscript_la_OBJECTS = linearscript.lo
libfstlinearscript_la_OBJECTS = $(am_libfstlinearscript_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
libfstlinearscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(libfstlinearscript_la_LDFLAGS) \
$(LDFLAGS) -o $@
@HAVE_SCRIPT_TRUE@am_libfstlinearscript_la_rpath = -rpath $(libdir)
linear_classifier_fst_la_LIBADD =
am_linear_classifier_fst_la_OBJECTS = linear-classifier-fst.lo
linear_classifier_fst_la_OBJECTS = \
$(am_linear_classifier_fst_la_OBJECTS)
linear_classifier_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(linear_classifier_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
@HAVE_SCRIPT_FALSE@am_linear_classifier_fst_la_rpath = -rpath \
@HAVE_SCRIPT_FALSE@ $(libfstdir)
@HAVE_SCRIPT_TRUE@am_linear_classifier_fst_la_rpath = -rpath \
@HAVE_SCRIPT_TRUE@ $(libfstdir)
linear_tagger_fst_la_LIBADD =
am_linear_tagger_fst_la_OBJECTS = linear-tagger-fst.lo
linear_tagger_fst_la_OBJECTS = $(am_linear_tagger_fst_la_OBJECTS)
linear_tagger_fst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
$(AM_CXXFLAGS) $(CXXFLAGS) $(linear_tagger_fst_la_LDFLAGS) \
$(LDFLAGS) -o $@
@HAVE_SCRIPT_FALSE@am_linear_tagger_fst_la_rpath = -rpath $(libfstdir)
@HAVE_SCRIPT_TRUE@am_linear_tagger_fst_la_rpath = -rpath $(libfstdir)
PROGRAMS = $(bin_PROGRAMS)
am__fstlinear_SOURCES_DIST = fstlinear.cc
@HAVE_BIN_TRUE@am_fstlinear_OBJECTS = fstlinear.$(OBJEXT)
fstlinear_OBJECTS = $(am_fstlinear_OBJECTS)
fstlinear_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstlinear_DEPENDENCIES = libfstlinearscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
am__fstloglinearapply_SOURCES_DIST = fstloglinearapply.cc
@HAVE_BIN_TRUE@am_fstloglinearapply_OBJECTS = \
@HAVE_BIN_TRUE@ fstloglinearapply.$(OBJEXT)
fstloglinearapply_OBJECTS = $(am_fstloglinearapply_OBJECTS)
fstloglinearapply_LDADD = $(LDADD)
@HAVE_BIN_TRUE@fstloglinearapply_DEPENDENCIES = libfstlinearscript.la \
@HAVE_BIN_TRUE@ ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES =
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CXXFLAGS) $(CXXFLAGS)
AM_V_CXX = $(am__v_CXX_@AM_V@)
am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
am__v_CXX_0 = @echo " CXX " $@;
am__v_CXX_1 =
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
am__v_CXXLD_0 = @echo " CXXLD " $@;
am__v_CXXLD_1 =
SOURCES = $(libfstlinearscript_la_SOURCES) \
$(linear_classifier_fst_la_SOURCES) \
$(linear_tagger_fst_la_SOURCES) $(fstlinear_SOURCES) \
$(fstloglinearapply_SOURCES)
DIST_SOURCES = $(am__libfstlinearscript_la_SOURCES_DIST) \
$(linear_classifier_fst_la_SOURCES) \
$(linear_tagger_fst_la_SOURCES) $(am__fstlinear_SOURCES_DIST) \
$(am__fstloglinearapply_SOURCES_DIST)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DL_LIBS = @DL_LIBS@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PYTHON = @PYTHON@
PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@
PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@
PYTHON_LDFLAGS = @PYTHON_LDFLAGS@
PYTHON_PLATFORM = @PYTHON_PLATFORM@
PYTHON_PREFIX = @PYTHON_PREFIX@
PYTHON_SITE_PKG = @PYTHON_SITE_PKG@
PYTHON_VERSION = @PYTHON_VERSION@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
libfstdir = @libfstdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
pkgpyexecdir = @pkgpyexecdir@
pkgpythondir = @pkgpythondir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pyexecdir = @pyexecdir@
pythondir = @pythondir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS)
@HAVE_BIN_TRUE@LDADD = libfstlinearscript.la ../../script/libfstscript.la \
@HAVE_BIN_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS)
@HAVE_BIN_TRUE@fstlinear_SOURCES = fstlinear.cc
@HAVE_BIN_TRUE@fstloglinearapply_SOURCES = fstloglinearapply.cc
@HAVE_SCRIPT_TRUE@libfstlinearscript_la_SOURCES = linearscript.cc
@HAVE_SCRIPT_TRUE@libfstlinearscript_la_LDFLAGS = -version-info 10:0:0 -lm $(DL_LIBS)
@HAVE_SCRIPT_TRUE@libfstlinearscript_la_LIBADD = ../../script/libfstscript.la \
@HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS)
@HAVE_SCRIPT_FALSE@libfst_LTLIBRARIES = linear_tagger-fst.la linear_classifier-fst.la
@HAVE_SCRIPT_TRUE@libfst_LTLIBRARIES = linear_tagger-fst.la \
@HAVE_SCRIPT_TRUE@ linear_classifier-fst.la
@HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstlinearscript.la
linear_tagger_fst_la_SOURCES = linear-tagger-fst.cc
linear_tagger_fst_la_LDFLAGS = -module
linear_classifier_fst_la_SOURCES = linear-classifier-fst.cc
linear_classifier_fst_la_LDFLAGS = -module
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/linear/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/extensions/linear/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
}
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
install-libfstLTLIBRARIES: $(libfst_LTLIBRARIES)
@$(NORMAL_INSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
echo " $(MKDIR_P) '$(DESTDIR)$(libfstdir)'"; \
$(MKDIR_P) "$(DESTDIR)$(libfstdir)" || exit 1; \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libfstdir)'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libfstdir)"; \
}
uninstall-libfstLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(libfst_LTLIBRARIES)'; test -n "$(libfstdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libfstdir)/$$f'"; \
$(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libfstdir)/$$f"; \
done
clean-libfstLTLIBRARIES:
-test -z "$(libfst_LTLIBRARIES)" || rm -f $(libfst_LTLIBRARIES)
@list='$(libfst_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
libfstlinearscript.la: $(libfstlinearscript_la_OBJECTS) $(libfstlinearscript_la_DEPENDENCIES) $(EXTRA_libfstlinearscript_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libfstlinearscript_la_LINK) $(am_libfstlinearscript_la_rpath) $(libfstlinearscript_la_OBJECTS) $(libfstlinearscript_la_LIBADD) $(LIBS)
linear_classifier-fst.la: $(linear_classifier_fst_la_OBJECTS) $(linear_classifier_fst_la_DEPENDENCIES) $(EXTRA_linear_classifier_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(linear_classifier_fst_la_LINK) $(am_linear_classifier_fst_la_rpath) $(linear_classifier_fst_la_OBJECTS) $(linear_classifier_fst_la_LIBADD) $(LIBS)
linear_tagger-fst.la: $(linear_tagger_fst_la_OBJECTS) $(linear_tagger_fst_la_DEPENDENCIES) $(EXTRA_linear_tagger_fst_la_DEPENDENCIES)
$(AM_V_CXXLD)$(linear_tagger_fst_la_LINK) $(am_linear_tagger_fst_la_rpath) $(linear_tagger_fst_la_OBJECTS) $(linear_tagger_fst_la_LIBADD) $(LIBS)
install-binPROGRAMS: $(bin_PROGRAMS)
@$(NORMAL_INSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
if test -n "$$list"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
$(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
while read p p1; do if test -f $$p \
|| test -f $$p1 \
; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
sed -e 'p;s,.*/,,;n;h' \
-e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
{ d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
if ($$2 == $$4) files[d] = files[d] " " $$1; \
else { print "f", $$3 "/" $$4, $$1; } } \
END { for (d in files) print "f", d, files[d] }' | \
while read type dir files; do \
if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
test -z "$$files" || { \
echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
$(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
} \
; done
uninstall-binPROGRAMS:
@$(NORMAL_UNINSTALL)
@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
-e 's/$$/$(EXEEXT)/' \
`; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(bindir)" && rm -f $$files
clean-binPROGRAMS:
@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
echo " rm -f" $$list; \
rm -f $$list || exit $$?; \
test -n "$(EXEEXT)" || exit 0; \
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
fstlinear$(EXEEXT): $(fstlinear_OBJECTS) $(fstlinear_DEPENDENCIES) $(EXTRA_fstlinear_DEPENDENCIES)
@rm -f fstlinear$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstlinear_OBJECTS) $(fstlinear_LDADD) $(LIBS)
fstloglinearapply$(EXEEXT): $(fstloglinearapply_OBJECTS) $(fstloglinearapply_DEPENDENCIES) $(EXTRA_fstloglinearapply_DEPENDENCIES)
@rm -f fstloglinearapply$(EXEEXT)
$(AM_V_CXXLD)$(CXXLINK) $(fstloglinearapply_OBJECTS) $(fstloglinearapply_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstlinear.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fstloglinearapply.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/linear-classifier-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/linear-tagger-fst.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/linearscript.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
install-binPROGRAMS: install-libLTLIBRARIES
installdirs:
for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libfstdir)" "$(DESTDIR)$(bindir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libfstLTLIBRARIES clean-libtool mostlyclean-am
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am: install-libfstLTLIBRARIES
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am: install-binPROGRAMS install-libLTLIBRARIES
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES \
uninstall-libfstLTLIBRARIES
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
clean-libfstLTLIBRARIES clean-libtool cscopelist-am ctags \
ctags-am distclean distclean-compile distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-binPROGRAMS \
install-data install-data-am install-dvi install-dvi-am \
install-exec install-exec-am install-html install-html-am \
install-info install-info-am install-libLTLIBRARIES \
install-libfstLTLIBRARIES install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \
uninstall-libLTLIBRARIES uninstall-libfstLTLIBRARIES
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm | coqui_public_repos/inference-engine/third_party/kenlm/lm/bhiksha.cc | #include "lm/bhiksha.hh"
#include "lm/binary_format.hh"
#include "lm/config.hh"
#include "util/file.hh"
#include "util/exception.hh"
#include <limits>
namespace lm {
namespace ngram {
namespace trie {
DontBhiksha::DontBhiksha(const void * /*base*/, uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) :
next_(util::BitsMask::ByMax(max_next)) {}
const uint8_t kArrayBhikshaVersion = 0;
// TODO: put this in binary file header instead when I change the binary file format again.
void ArrayBhiksha::UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config) {
uint8_t buffer[2];
file.ReadForConfig(buffer, 2, offset);
uint8_t version = buffer[0];
uint8_t configured_bits = buffer[1];
if (version != kArrayBhikshaVersion) UTIL_THROW(FormatLoadException, "This file has sorted array compression version " << (unsigned) version << " but the code expects version " << (unsigned)kArrayBhikshaVersion);
config.pointer_bhiksha_bits = configured_bits;
}
namespace {
// Find argmin_{chopped \in [0, RequiredBits(max_next)]} ChoppedDelta(max_offset)
uint8_t ChopBits(uint64_t max_offset, uint64_t max_next, const Config &config) {
uint8_t required = util::RequiredBits(max_next);
uint8_t best_chop = 0;
int64_t lowest_change = std::numeric_limits<int64_t>::max();
// There are probably faster ways but I don't care because this is only done once per order at construction time.
for (uint8_t chop = 0; chop <= std::min(required, config.pointer_bhiksha_bits); ++chop) {
int64_t change = (max_next >> (required - chop)) * 64 /* table cost in bits */
- max_offset * static_cast<int64_t>(chop); /* savings in bits*/
if (change < lowest_change) {
lowest_change = change;
best_chop = chop;
}
}
return best_chop;
}
std::size_t ArrayCount(uint64_t max_offset, uint64_t max_next, const Config &config) {
uint8_t required = util::RequiredBits(max_next);
uint8_t chopping = ChopBits(max_offset, max_next, config);
return (max_next >> (required - chopping)) + 1 /* we store 0 too */;
}
} // namespace
uint64_t ArrayBhiksha::Size(uint64_t max_offset, uint64_t max_next, const Config &config) {
return sizeof(uint64_t) * (1 /* header */ + ArrayCount(max_offset, max_next, config)) + 7 /* 8-byte alignment */;
}
uint8_t ArrayBhiksha::InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config) {
return util::RequiredBits(max_next) - ChopBits(max_offset, max_next, config);
}
namespace {
void *AlignTo8(void *from) {
uint8_t *val = reinterpret_cast<uint8_t*>(from);
std::size_t remainder = reinterpret_cast<std::size_t>(val) & 7;
if (!remainder) return val;
return val + 8 - remainder;
}
} // namespace
ArrayBhiksha::ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_next, const Config &config)
: next_inline_(util::BitsMask::ByBits(InlineBits(max_offset, max_next, config))),
offset_begin_(reinterpret_cast<const uint64_t*>(AlignTo8(base)) + 1 /* 8-byte header */),
offset_end_(offset_begin_ + ArrayCount(max_offset, max_next, config)),
write_to_(reinterpret_cast<uint64_t*>(AlignTo8(base)) + 1 /* 8-byte header */ + 1 /* first entry is 0 */),
original_base_(base) {}
void ArrayBhiksha::FinishedLoading(const Config &config) {
// *offset_begin_ = 0 but without a const_cast.
*(write_to_ - (write_to_ - offset_begin_)) = 0;
if (write_to_ != offset_end_) UTIL_THROW(util::Exception, "Did not get all the array entries that were expected.");
uint8_t *head_write = reinterpret_cast<uint8_t*>(original_base_);
*(head_write++) = kArrayBhikshaVersion;
*(head_write++) = config.pointer_bhiksha_bits;
}
} // namespace trie
} // namespace ngram
} // namespace lm
| 0 |
coqui_public_repos/STT-examples/django_api_streaming/stt_app | coqui_public_repos/STT-examples/django_api_streaming/stt_app/config/config.json | {
"stt": {
"model": "/share/STT/models/output_graph.tflite",
"lm": "/share/STT/models/kenlm.scorer",
"audiofiledir": "/tmp/tmp_audio",
"audiofilelength": "10",
"debug": "1"
}
} | 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/symbol-table.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes to provide symbol-to-integer and integer-to-symbol mappings.
#ifndef FST_SYMBOL_TABLE_H_
#define FST_SYMBOL_TABLE_H_
#include <functional>
#include <ios>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include <fst/compat.h>
#include <fst/flags.h>
#include <fst/log.h>
#include <fstream>
#include <map>
DECLARE_bool(fst_compat_symbols);
namespace fst {
constexpr int64_t kNoSymbol = -1;
// WARNING: Reading via symbol table read options should
// not be used. This is a temporary work around for
// reading symbol ranges of previously stored symbol sets.
struct SymbolTableReadOptions {
SymbolTableReadOptions() {}
SymbolTableReadOptions(
std::vector<std::pair<int64_t, int64_t>> string_hash_ranges,
const string &source)
: string_hash_ranges(std::move(string_hash_ranges)), source(source) {}
std::vector<std::pair<int64_t, int64_t>> string_hash_ranges;
string source;
};
struct SymbolTableTextOptions {
explicit SymbolTableTextOptions(bool allow_negative_labels = false);
bool allow_negative_labels;
string fst_field_separator;
};
namespace internal {
// List of symbols with a dense hash for looking up symbol index, rehashing at
// 75% occupancy.
class DenseSymbolMap {
public:
DenseSymbolMap();
DenseSymbolMap(const DenseSymbolMap &x);
std::pair<int64_t, bool> InsertOrFind(const string &key);
int64_t Find(const string &key) const;
size_t Size() const { return symbols_.size(); }
const string &GetSymbol(size_t idx) const { return symbols_[idx]; }
void RemoveSymbol(size_t idx);
private:
// num_buckets must be power of 2.
void Rehash(size_t num_buckets);
int64_t empty_;
std::vector<string> symbols_;
std::hash<string> str_hash_;
std::vector<int64_t> buckets_;
uint64_t hash_mask_;
};
class SymbolTableImpl {
public:
explicit SymbolTableImpl(const string &name)
: name_(name),
available_key_(0),
dense_key_limit_(0),
check_sum_finalized_(false) {}
SymbolTableImpl(const SymbolTableImpl &impl)
: name_(impl.name_),
available_key_(impl.available_key_),
dense_key_limit_(impl.dense_key_limit_),
symbols_(impl.symbols_),
idx_key_(impl.idx_key_),
key_map_(impl.key_map_),
check_sum_finalized_(false) {}
int64_t AddSymbol(const string &symbol, int64_t key);
int64_t AddSymbol(const string &symbol) {
return AddSymbol(symbol, available_key_);
}
// Removes the symbol with the given key. The removal is costly
// (O(NumSymbols)) and may reduce the efficiency of Find() because of a
// potentially reduced size of the dense key interval.
void RemoveSymbol(int64_t key);
static SymbolTableImpl *ReadText(
std::istream &strm, const string &name,
const SymbolTableTextOptions &opts = SymbolTableTextOptions());
static SymbolTableImpl* Read(std::istream &strm,
const SymbolTableReadOptions &opts);
bool Write(std::ostream &strm) const;
// Return the string associated with the key. If the key is out of
// range (<0, >max), return an empty string.
string Find(int64_t key) const {
int64_t idx = key;
if (key < 0 || key >= dense_key_limit_) {
const auto it = key_map_.find(key);
if (it == key_map_.end()) return "";
idx = it->second;
}
if (idx < 0 || idx >= symbols_.Size()) return "";
return symbols_.GetSymbol(idx);
}
// Returns the key associated with the symbol; if the symbol
// does not exists, returns kNoSymbol.
int64_t Find(const string &symbol) const {
int64_t idx = symbols_.Find(symbol);
if (idx == kNoSymbol || idx < dense_key_limit_) return idx;
return idx_key_[idx - dense_key_limit_];
}
bool Member(int64_t key) const { return !Find(key).empty(); }
bool Member(const string &symbol) const { return Find(symbol) != kNoSymbol; }
int64_t GetNthKey(std::ptrdiff_t pos) const {
if (pos < 0 || pos >= symbols_.Size()) return kNoSymbol;
if (pos < dense_key_limit_) return pos;
return Find(symbols_.GetSymbol(pos));
}
const string &Name() const { return name_; }
void SetName(const string &new_name) { name_ = new_name; }
const string &CheckSum() const {
MaybeRecomputeCheckSum();
return check_sum_string_;
}
const string &LabeledCheckSum() const {
MaybeRecomputeCheckSum();
return labeled_check_sum_string_;
}
int64_t AvailableKey() const { return available_key_; }
size_t NumSymbols() const { return symbols_.Size(); }
private:
// Recomputes the checksums (both of them) if we've had changes since the last
// computation (i.e., if check_sum_finalized_ is false).
// Takes ~2.5 microseconds (dbg) or ~230 nanoseconds (opt) on a 2.67GHz Xeon
// if the checksum is up-to-date (requiring no recomputation).
void MaybeRecomputeCheckSum() const;
string name_;
int64_t available_key_;
int64_t dense_key_limit_;
DenseSymbolMap symbols_;
// Maps index to key for index >= dense_key_limit:
// key = idx_key_[index - dense_key_limit]
std::vector<int64_t> idx_key_;
// Maps key to index for key >= dense_key_limit_.
// index = key_map_[key]
map<int64_t, int64_t> key_map_;
mutable bool check_sum_finalized_;
mutable string check_sum_string_;
mutable string labeled_check_sum_string_;
mutable Mutex check_sum_mutex_;
};
} // namespace internal
// Symbol (string) to integer (and reverse) mapping.
//
// The SymbolTable implements the mappings of labels to strings and reverse.
// SymbolTables are used to describe the alphabet of the input and output
// labels for arcs in a Finite State Transducer.
//
// SymbolTables are reference-counted and can therefore be shared across
// multiple machines. For example a language model grammar G, with a
// SymbolTable for the words in the language model can share this symbol
// table with the lexical representation L o G.
class SymbolTable {
public:
// Constructs symbol table with an optional name.
explicit SymbolTable(const string &name = "<unspecified>")
: impl_(std::make_shared<internal::SymbolTableImpl>(name)) {}
virtual ~SymbolTable() {}
// Reads a text representation of the symbol table from an istream. Pass a
// name to give the resulting SymbolTable.
static SymbolTable *ReadText(
std::istream &strm, const string &name,
const SymbolTableTextOptions &opts = SymbolTableTextOptions()) {
auto *impl = internal::SymbolTableImpl::ReadText(strm, name, opts);
return impl ? new SymbolTable(impl) : nullptr;
}
// Reads a text representation of the symbol table.
static SymbolTable *ReadText(const string &filename,
const SymbolTableTextOptions &opts = SymbolTableTextOptions()) {
std::ifstream strm(filename, std::ios_base::in);
if (!strm.good()) {
LOG(ERROR) << "SymbolTable::ReadText: Can't open file " << filename;
return nullptr;
}
return ReadText(strm, filename, opts);
}
// WARNING: Reading via symbol table read options should not be used. This is
// a temporary work-around.
static SymbolTable* Read(std::istream &strm,
const SymbolTableReadOptions &opts) {
auto *impl = internal::SymbolTableImpl::Read(strm, opts);
return (impl) ? new SymbolTable(impl) : nullptr;
}
// Reads a binary dump of the symbol table from a stream.
static SymbolTable *Read(std::istream &strm,
const string &source) {
SymbolTableReadOptions opts;
opts.source = source;
return Read(strm, opts);
}
// Reads a binary dump of the symbol table.
static SymbolTable *Read(const string& filename) {
std::ifstream strm(filename,
std::ios_base::in | std::ios_base::binary);
if (!strm.good()) {
LOG(ERROR) << "SymbolTable::Read: Can't open file " << filename;
return nullptr;
}
return Read(strm, filename);
}
// DERIVABLE INTERFACE
// Creates a reference counted copy.
virtual SymbolTable *Copy() const { return new SymbolTable(*this); }
// Adds a symbol with given key to table. A symbol table also keeps track of
// the last available key (highest key value in the symbol table).
virtual int64_t AddSymbol(const string &symbol, int64_t key) {
MutateCheck();
return impl_->AddSymbol(symbol, key);
}
// Adds a symbol to the table. The associated value key is automatically
// assigned by the symbol table.
virtual int64_t AddSymbol(const string &symbol) {
MutateCheck();
return impl_->AddSymbol(symbol);
}
// Adds another symbol table to this table. All key values will be offset
// by the current available key (highest key value in the symbol table).
// Note string symbols with the same key value will still have the same
// key value after the symbol table has been merged, but a different
// value. Adding symbol tables do not result in changes in the base table.
virtual void AddTable(const SymbolTable &table);
// Returns the current available key (i.e., highest key + 1) in the symbol
// table.
virtual int64_t AvailableKey() const { return impl_->AvailableKey(); }
// Return the label-agnostic MD5 check-sum for this table. All new symbols
// added to the table will result in an updated checksum. Deprecated.
virtual const string &CheckSum() const { return impl_->CheckSum(); }
virtual int64_t GetNthKey(std::ptrdiff_t pos) const { return impl_->GetNthKey(pos); }
// Returns the string associated with the key; if the key is out of
// range (<0, >max), returns an empty string.
virtual string Find(int64_t key) const { return impl_->Find(key); }
// Returns the key associated with the symbol; if the symbol does not exist,
// kNoSymbol is returned.
virtual int64_t Find(const string &symbol) const { return impl_->Find(symbol); }
// Same as CheckSum(), but returns an label-dependent version.
virtual const string &LabeledCheckSum() const {
return impl_->LabeledCheckSum();
}
virtual bool Member(int64_t key) const { return impl_->Member(key); }
virtual bool Member(const string &symbol) const {
return impl_->Member(symbol);
}
// Returns the name of the symbol table.
virtual const string &Name() const { return impl_->Name(); }
// Returns the current number of symbols in table (not necessarily equal to
// AvailableKey()).
virtual size_t NumSymbols() const { return impl_->NumSymbols(); }
virtual void RemoveSymbol(int64_t key) {
MutateCheck();
return impl_->RemoveSymbol(key);
}
// Sets the name of the symbol table.
virtual void SetName(const string &new_name) {
MutateCheck();
impl_->SetName(new_name);
}
virtual bool Write(std::ostream &strm) const { return impl_->Write(strm); }
virtual bool Write(const string &filename) const {
std::ofstream strm(filename,
std::ios_base::out | std::ios_base::binary);
if (!strm.good()) {
LOG(ERROR) << "SymbolTable::Write: Can't open file " << filename;
return false;
}
return Write(strm);
}
// Dump a text representation of the symbol table via a stream.
virtual bool WriteText(std::ostream &strm,
const SymbolTableTextOptions &opts = SymbolTableTextOptions()) const;
// Dump an text representation of the symbol table.
virtual bool WriteText(const string &filename) const {
std::ofstream strm(filename);
if (!strm.good()) {
LOG(ERROR) << "SymbolTable::WriteText: Can't open file " << filename;
return false;
}
return WriteText(strm);
}
private:
explicit SymbolTable(internal::SymbolTableImpl *impl) : impl_(impl) {}
void MutateCheck() {
if (!impl_.unique()) impl_.reset(new internal::SymbolTableImpl(*impl_));
}
const internal::SymbolTableImpl *Impl() const { return impl_.get(); }
private:
std::shared_ptr<internal::SymbolTableImpl> impl_;
};
// Iterator class for symbols in a symbol table.
class SymbolTableIterator {
public:
explicit SymbolTableIterator(const SymbolTable &table)
: table_(table),
pos_(0),
nsymbols_(table.NumSymbols()),
key_(table.GetNthKey(0)) {}
~SymbolTableIterator() {}
// Returns whether iterator is done.
bool Done() const { return (pos_ == nsymbols_); }
// Return the key of the current symbol.
int64_t Value() const { return key_; }
// Return the string of the current symbol.
string Symbol() const { return table_.Find(key_); }
// Advances iterator.
void Next() {
++pos_;
if (pos_ < nsymbols_) key_ = table_.GetNthKey(pos_);
}
// Resets iterator.
void Reset() {
pos_ = 0;
key_ = table_.GetNthKey(0);
}
private:
const SymbolTable &table_;
std::ptrdiff_t pos_;
size_t nsymbols_;
int64_t key_;
};
// Relabels a symbol table as specified by the input vector of pairs
// (old label, new label). The new symbol table only retains symbols
// for which a relabeling is explicitly specified.
//
// TODO(allauzen): consider adding options to allow for some form of implicit
// identity relabeling.
template <class Label>
SymbolTable *RelabelSymbolTable(const SymbolTable *table,
const std::vector<std::pair<Label, Label>> &pairs) {
auto *new_table = new SymbolTable(
table->Name().empty() ? string()
: (string("relabeled_") + table->Name()));
for (const auto &pair : pairs) {
new_table->AddSymbol(table->Find(pair.first), pair.second);
}
return new_table;
}
// Returns true if the two symbol tables have equal checksums. Passing in
// nullptr for either table always returns true.
bool CompatSymbols(const SymbolTable *syms1, const SymbolTable *syms2,
bool warning = true);
// Symbol Table serialization.
void SymbolTableToString(const SymbolTable *table, string *result);
SymbolTable *StringToSymbolTable(const string &str);
} // namespace fst
#endif // FST_SYMBOL_TABLE_H_
| 0 |
coqui_public_repos/STT/native_client/swift | coqui_public_repos/STT/native_client/swift/stt_ios_test/SpeechRecognitionImpl.swift | //
// SpeechRecognitionImpl.swift
// stt_ios_test
//
// Created by Erik Ziegler on 27.07.20.
// Copyright © 2020 Mozilla
// Copyright © 2020 Erik Ziegler
// Copyright © 2021 Coqui GmbH
import Foundation
import AVFoundation
import Accelerate
import stt_ios
class SpeechRecognitionImpl {
private var model: STTModel
private var stream: STTStream?
// The interval in which audio data is read from
// the buffer queue and fed into the model.
// Should be slightly higher than [AudioInput.processingIntervalInMillis].
private let modelFeedIntervalInMillis = 100
// The interval in which the model passes data through the decoder.
// Should be slightly above timestep length (20 ms) * number of timestamps in a block (default is 16).
private let decodeIntervalInMillis = 350
private var audioData = Data()
private var feedTimer: Timer? = nil
private var decodeTimer: Timer? = nil
private var audioInput: AudioInput? = nil
private var bufferQueue: [[Int16]] = [[Int16]]()
private let onPartialResult: (String) -> Void
private let onResult: (String) -> Void
init(onPartialResult:@escaping (String) -> Void, onResult:@escaping (String) -> Void) {
let modelPath = Bundle.main.path(forResource: "model", ofType: "tflite")!
let scorerPath = Bundle.main.path(forResource: "huge-vocab", ofType: "scorer")!
model = try! STTModel(modelPath: modelPath)
try! model.enableExternalScorer(scorerPath: scorerPath)
self.onPartialResult = onPartialResult
self.onResult = onResult
}
public func startMicrophoneRecognition() {
audioData = Data()
stream = try! model.createStream()
audioInput = AudioInput() { shorts in
self.bufferQueue.append(shorts)
}
print("Started listening...")
audioInput!.start()
feedTimer = Timer.scheduledTimer(
withTimeInterval: Double(modelFeedIntervalInMillis) / 1000.0,
repeats: true
) { _ in
if (!self.bufferQueue.isEmpty) {
let shorts = self.bufferQueue.removeFirst()
self.stream!.feedAudioContent(buffer: shorts)
// (optional) collect audio data for writing to file
shorts.withUnsafeBufferPointer { buffPtr in
self.audioData.append(buffPtr)
}
}
}
decodeTimer = Timer.scheduledTimer(
withTimeInterval: Double(decodeIntervalInMillis) / 1000.0,
repeats: true
) { _ in
// (optional) get partial result
let partialResult = self.stream!.intermediateDecode()
self.onPartialResult(partialResult)
}
}
public func stopMicrophoneRecognition() {
audioInput!.stop()
feedTimer!.invalidate()
feedTimer = nil
decodeTimer!.invalidate()
decodeTimer = nil
bufferQueue.removeAll()
let result = stream?.finishStream() ?? ""
onResult(result)
// (optional) useful for checking the recorded audio
writeAudioDataToPCMFile()
}
private func writeAudioDataToPCMFile() {
let documents = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.documentDirectory, FileManager.SearchPathDomainMask.userDomainMask, true)[0]
let filePath = documents + "/recording.pcm"
let url = URL(fileURLWithPath: filePath)
try! audioData.write(to: url)
print("Saved audio to " + filePath)
}
// MARK: Audio file recognition
private func render(audioContext: AudioContext?, stream: STTStream) {
guard let audioContext = audioContext else {
fatalError("Couldn't create the audioContext")
}
let sampleRange: CountableRange<Int> = 0..<audioContext.totalSamples
guard let reader = try? AVAssetReader(asset: audioContext.asset)
else {
fatalError("Couldn't initialize the AVAssetReader")
}
reader.timeRange = CMTimeRange(start: CMTime(value: Int64(sampleRange.lowerBound), timescale: audioContext.asset.duration.timescale),
duration: CMTime(value: Int64(sampleRange.count), timescale: audioContext.asset.duration.timescale))
let outputSettingsDict: [String : Any] = [
AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVLinearPCMBitDepthKey: 16,
AVLinearPCMIsBigEndianKey: false,
AVLinearPCMIsFloatKey: false,
AVLinearPCMIsNonInterleaved: false
]
let readerOutput = AVAssetReaderTrackOutput(track: audioContext.assetTrack,
outputSettings: outputSettingsDict)
readerOutput.alwaysCopiesSampleData = false
reader.add(readerOutput)
var sampleBuffer = Data()
// 16-bit samples
reader.startReading()
defer { reader.cancelReading() }
while reader.status == .reading {
guard let readSampleBuffer = readerOutput.copyNextSampleBuffer(),
let readBuffer = CMSampleBufferGetDataBuffer(readSampleBuffer) else {
break
}
// Append audio sample buffer into our current sample buffer
var readBufferLength = 0
var readBufferPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(readBuffer,
atOffset: 0,
lengthAtOffsetOut: &readBufferLength,
totalLengthOut: nil,
dataPointerOut: &readBufferPointer)
sampleBuffer.append(UnsafeBufferPointer(start: readBufferPointer, count: readBufferLength))
CMSampleBufferInvalidate(readSampleBuffer)
let totalSamples = sampleBuffer.count / MemoryLayout<Int16>.size
print("read \(totalSamples) samples")
sampleBuffer.withUnsafeBytes { (samples: UnsafeRawBufferPointer) in
let unsafeBufferPointer = samples.bindMemory(to: Int16.self)
stream.feedAudioContent(buffer: unsafeBufferPointer)
}
sampleBuffer.removeAll()
}
// if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown)
guard reader.status == .completed else {
fatalError("Couldn't read the audio file")
}
}
private func recognizeFile(audioPath: String, completion: @escaping () -> ()) {
let url = URL(fileURLWithPath: audioPath)
let stream = try! model.createStream()
print("\(audioPath)")
let start = CFAbsoluteTimeGetCurrent()
AudioContext.load(fromAudioURL: url, completionHandler: { audioContext in
guard let audioContext = audioContext else {
fatalError("Couldn't create the audioContext")
}
self.render(audioContext: audioContext, stream: stream)
let result = stream.finishStream()
let end = CFAbsoluteTimeGetCurrent()
print("\"\(audioPath)\": \(end - start) - \(result)")
completion()
})
}
public func recognizeFiles() {
// Add file names (without extension) here if you want to test recognition from files.
// Remember to add them to the project under Copy Bundle Resources.
let files: [String] = []
let serialQueue = DispatchQueue(label: "serialQueue")
let group = DispatchGroup()
group.enter()
if let first = files.first {
serialQueue.async {
self.recognizeFile(audioPath: Bundle.main.path(forResource: first, ofType: "wav")!) {
group.leave()
}
}
}
for path in files.dropFirst() {
group.wait()
group.enter()
self.recognizeFile(audioPath: Bundle.main.path(forResource: path, ofType: "wav")!) {
group.leave()
}
}
}
}
| 0 |
coqui_public_repos/STT-examples/python_websocket_server | coqui_public_repos/STT-examples/python_websocket_server/stt_server/engine.py | import wave
from io import BytesIO
import ffmpeg
import numpy as np
from stt import Model
def normalize_audio(audio):
out, err = (
ffmpeg.input("pipe:0")
.output(
"pipe:1",
f="WAV",
acodec="pcm_s16le",
ac=1,
ar="16k",
loglevel="error",
hide_banner=None,
)
.run(input=audio, capture_stdout=True, capture_stderr=True)
)
if err:
raise Exception(err)
return out
class SpeechToTextEngine:
def __init__(self, model_path, scorer_path):
self.model = Model(model_path)
self.model.enableExternalScorer(scorer_path)
def run(self, audio):
audio = normalize_audio(audio)
audio = BytesIO(audio)
with wave.Wave_read(audio) as wav:
audio = np.frombuffer(wav.readframes(wav.getnframes()), np.int16)
result = self.model.stt(audio)
return result
| 0 |
coqui_public_repos/inference-engine/third_party | coqui_public_repos/inference-engine/third_party/tensorflow/mfcc_mel_filterbank.cc | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This code resamples the FFT bins, and smooths then with triangle-shaped
// weights to create a mel-frequency filter bank. For filter i centered at f_i,
// there is a triangular weighting of the FFT bins that extends from
// filter f_i-1 (with a value of zero at the left edge of the triangle) to f_i
// (where the filter value is 1) to f_i+1 (where the filter values returns to
// zero).
// Note: this code fails if you ask for too many channels. The algorithm used
// here assumes that each FFT bin contributes to at most two channels: the
// right side of a triangle for channel i, and the left side of the triangle
// for channel i+1. If you ask for so many channels that some of the
// resulting mel triangle filters are smaller than a single FFT bin, these
// channels may end up with no contributing FFT bins. The resulting mel
// spectrum output will have some channels that are always zero.
#include "mfcc_mel_filterbank.h"
#include <math.h>
namespace tensorflow {
MfccMelFilterbank::MfccMelFilterbank() : initialized_(false) {}
bool MfccMelFilterbank::Initialize(int input_length, double input_sample_rate,
int output_channel_count,
double lower_frequency_limit,
double upper_frequency_limit) {
num_channels_ = output_channel_count;
sample_rate_ = input_sample_rate;
input_length_ = input_length;
if (num_channels_ < 1) {
// LOG(ERROR) << "Number of filterbank channels must be positive.";
return false;
}
if (sample_rate_ <= 0) {
// LOG(ERROR) << "Sample rate must be positive.";
return false;
}
if (input_length < 2) {
// LOG(ERROR) << "Input length must greater than 1.";
return false;
}
if (lower_frequency_limit < 0) {
// LOG(ERROR) << "Lower frequency limit must be nonnegative.";
return false;
}
if (upper_frequency_limit <= lower_frequency_limit) {
// LOG(ERROR) << "Upper frequency limit must be greater than "
// << "lower frequency limit.";
return false;
}
// An extra center frequency is computed at the top to get the upper
// limit on the high side of the final triangular filter.
center_frequencies_.resize(num_channels_ + 1);
const double mel_low = FreqToMel(lower_frequency_limit);
const double mel_hi = FreqToMel(upper_frequency_limit);
const double mel_span = mel_hi - mel_low;
const double mel_spacing = mel_span / static_cast<double>(num_channels_ + 1);
for (int i = 0; i < num_channels_ + 1; ++i) {
center_frequencies_[i] = mel_low + (mel_spacing * (i + 1));
}
// Always exclude DC; emulate HTK.
const double hz_per_sbin =
0.5 * sample_rate_ / static_cast<double>(input_length_ - 1);
start_index_ = static_cast<int>(1.5 + (lower_frequency_limit / hz_per_sbin));
end_index_ = static_cast<int>(upper_frequency_limit / hz_per_sbin);
// Maps the input spectrum bin indices to filter bank channels/indices. For
// each FFT bin, band_mapper tells us which channel this bin contributes to
// on the right side of the triangle. Thus this bin also contributes to the
// left side of the next channel's triangle response.
band_mapper_.resize(input_length_);
int channel = 0;
for (int i = 0; i < input_length_; ++i) {
double melf = FreqToMel(i * hz_per_sbin);
if ((i < start_index_) || (i > end_index_)) {
band_mapper_[i] = -2; // Indicate an unused Fourier coefficient.
} else {
while ((channel < num_channels_) &&
(center_frequencies_[channel] < melf)) {
++channel;
}
band_mapper_[i] = channel - 1; // Can be == -1
}
}
// Create the weighting functions to taper the band edges. The contribution
// of any one FFT bin is based on its distance along the continuum between two
// mel-channel center frequencies. This bin contributes weights_[i] to the
// current channel and 1-weights_[i] to the next channel.
weights_.resize(input_length_);
for (int i = 0; i < input_length_; ++i) {
channel = band_mapper_[i];
if ((i < start_index_) || (i > end_index_)) {
weights_[i] = 0.0;
} else {
if (channel >= 0) {
weights_[i] =
(center_frequencies_[channel + 1] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[channel + 1] - center_frequencies_[channel]);
} else {
weights_[i] = (center_frequencies_[0] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[0] - mel_low);
}
}
}
// Check the sum of FFT bin weights for every mel band to identify
// situations where the mel bands are so narrow that they don't get
// significant weight on enough (or any) FFT bins -- i.e., too many
// mel bands have been requested for the given FFT size.
std::vector<int> bad_channels;
for (int c = 0; c < num_channels_; ++c) {
float band_weights_sum = 0.0;
for (int i = 0; i < input_length_; ++i) {
if (band_mapper_[i] == c - 1) {
band_weights_sum += (1.0 - weights_[i]);
} else if (band_mapper_[i] == c) {
band_weights_sum += weights_[i];
}
}
// The lowest mel channels have the fewest FFT bins and the lowest
// weights sum. But given that the target gain at the center frequency
// is 1.0, if the total sum of weights is 0.5, we're in bad shape.
if (band_weights_sum < 0.5) {
bad_channels.push_back(c);
}
}
if (!bad_channels.empty()) {
// LOG(ERROR) << "Missing " << bad_channels.size() << " bands "
// << " starting at " << bad_channels[0]
// << " in mel-frequency design. "
// << "Perhaps too many channels or "
// << "not enough frequency resolution in spectrum. ("
// << "input_length: " << input_length
// << " input_sample_rate: " << input_sample_rate
// << " output_channel_count: " << output_channel_count
// << " lower_frequency_limit: " << lower_frequency_limit
// << " upper_frequency_limit: " << upper_frequency_limit;
}
initialized_ = true;
return true;
}
// Compute the mel spectrum from the squared-magnitude FFT input by taking the
// square root, then summing FFT magnitudes under triangular integration windows
// whose widths increase with frequency.
void MfccMelFilterbank::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
// LOG(ERROR) << "Mel Filterbank not initialized.";
return;
}
if (input.size() <= end_index_) {
// LOG(ERROR) << "Input too short to compute filterbank";
return;
}
// Ensure output is right length and reset all values.
output->assign(num_channels_, 0.0);
for (int i = start_index_; i <= end_index_; i++) { // For each FFT bin
double spec_val = sqrt(input[i]);
double weighted = spec_val * weights_[i];
int channel = band_mapper_[i];
if (channel >= 0)
(*output)[channel] += weighted; // Right side of triangle, downward slope
channel++;
if (channel < num_channels_)
(*output)[channel] += spec_val - weighted; // Left side of triangle
}
}
double MfccMelFilterbank::FreqToMel(double freq) const {
return 1127.0 * log1p(freq / 700.0);
}
} // namespace tensorflow
| 0 |
coqui_public_repos/TTS/tests | coqui_public_repos/TTS/tests/data_tests/test_samplers.py | import functools
import random
import unittest
import torch
from TTS.config.shared_configs import BaseDatasetConfig
from TTS.tts.datasets import load_tts_samples
from TTS.tts.utils.data import get_length_balancer_weights
from TTS.tts.utils.languages import get_language_balancer_weights
from TTS.tts.utils.speakers import get_speaker_balancer_weights
from TTS.utils.samplers import BucketBatchSampler, PerfectBatchSampler
# Fixing random state to avoid random fails
torch.manual_seed(0)
dataset_config_en = BaseDatasetConfig(
formatter="ljspeech",
meta_file_train="metadata.csv",
meta_file_val="metadata.csv",
path="tests/data/ljspeech",
language="en",
)
dataset_config_pt = BaseDatasetConfig(
formatter="ljspeech",
meta_file_train="metadata.csv",
meta_file_val="metadata.csv",
path="tests/data/ljspeech",
language="pt-br",
)
# Adding the EN samples twice to create a language unbalanced dataset
train_samples, eval_samples = load_tts_samples(
[dataset_config_en, dataset_config_en, dataset_config_pt], eval_split=True
)
# gerenate a speaker unbalanced dataset
for i, sample in enumerate(train_samples):
if i < 5:
sample["speaker_name"] = "ljspeech-0"
else:
sample["speaker_name"] = "ljspeech-1"
def is_balanced(lang_1, lang_2):
return 0.85 < lang_1 / lang_2 < 1.2
class TestSamplers(unittest.TestCase):
def test_language_random_sampler(self): # pylint: disable=no-self-use
random_sampler = torch.utils.data.RandomSampler(train_samples)
ids = functools.reduce(lambda a, b: a + b, [list(random_sampler) for i in range(100)])
en, pt = 0, 0
for index in ids:
if train_samples[index]["language"] == "en":
en += 1
else:
pt += 1
assert not is_balanced(en, pt), "Random sampler is supposed to be unbalanced"
def test_language_weighted_random_sampler(self): # pylint: disable=no-self-use
weighted_sampler = torch.utils.data.sampler.WeightedRandomSampler(
get_language_balancer_weights(train_samples), len(train_samples)
)
ids = functools.reduce(lambda a, b: a + b, [list(weighted_sampler) for i in range(100)])
en, pt = 0, 0
for index in ids:
if train_samples[index]["language"] == "en":
en += 1
else:
pt += 1
assert is_balanced(en, pt), "Language Weighted sampler is supposed to be balanced"
def test_speaker_weighted_random_sampler(self): # pylint: disable=no-self-use
weighted_sampler = torch.utils.data.sampler.WeightedRandomSampler(
get_speaker_balancer_weights(train_samples), len(train_samples)
)
ids = functools.reduce(lambda a, b: a + b, [list(weighted_sampler) for i in range(100)])
spk1, spk2 = 0, 0
for index in ids:
if train_samples[index]["speaker_name"] == "ljspeech-0":
spk1 += 1
else:
spk2 += 1
assert is_balanced(spk1, spk2), "Speaker Weighted sampler is supposed to be balanced"
def test_perfect_sampler(self): # pylint: disable=no-self-use
classes = set()
for item in train_samples:
classes.add(item["speaker_name"])
sampler = PerfectBatchSampler(
train_samples,
classes,
batch_size=2 * 3, # total batch size
num_classes_in_batch=2,
label_key="speaker_name",
shuffle=False,
drop_last=True,
)
batchs = functools.reduce(lambda a, b: a + b, [list(sampler) for i in range(100)])
for batch in batchs:
spk1, spk2 = 0, 0
# for in each batch
for index in batch:
if train_samples[index]["speaker_name"] == "ljspeech-0":
spk1 += 1
else:
spk2 += 1
assert spk1 == spk2, "PerfectBatchSampler is supposed to be perfectly balanced"
def test_perfect_sampler_shuffle(self): # pylint: disable=no-self-use
classes = set()
for item in train_samples:
classes.add(item["speaker_name"])
sampler = PerfectBatchSampler(
train_samples,
classes,
batch_size=2 * 3, # total batch size
num_classes_in_batch=2,
label_key="speaker_name",
shuffle=True,
drop_last=False,
)
batchs = functools.reduce(lambda a, b: a + b, [list(sampler) for i in range(100)])
for batch in batchs:
spk1, spk2 = 0, 0
# for in each batch
for index in batch:
if train_samples[index]["speaker_name"] == "ljspeech-0":
spk1 += 1
else:
spk2 += 1
assert spk1 == spk2, "PerfectBatchSampler is supposed to be perfectly balanced"
def test_length_weighted_random_sampler(self): # pylint: disable=no-self-use
for _ in range(1000):
# gerenate a lenght unbalanced dataset with random max/min audio lenght
min_audio = random.randrange(1, 22050)
max_audio = random.randrange(44100, 220500)
for idx, item in enumerate(train_samples):
# increase the diversity of durations
random_increase = random.randrange(100, 1000)
if idx < 5:
item["audio_length"] = min_audio + random_increase
else:
item["audio_length"] = max_audio + random_increase
weighted_sampler = torch.utils.data.sampler.WeightedRandomSampler(
get_length_balancer_weights(train_samples, num_buckets=2), len(train_samples)
)
ids = functools.reduce(lambda a, b: a + b, [list(weighted_sampler) for i in range(100)])
len1, len2 = 0, 0
for index in ids:
if train_samples[index]["audio_length"] < max_audio:
len1 += 1
else:
len2 += 1
assert is_balanced(len1, len2), "Length Weighted sampler is supposed to be balanced"
def test_bucket_batch_sampler(self):
bucket_size_multiplier = 2
sampler = range(len(train_samples))
sampler = BucketBatchSampler(
sampler,
data=train_samples,
batch_size=7,
drop_last=True,
sort_key=lambda x: len(x["text"]),
bucket_size_multiplier=bucket_size_multiplier,
)
# check if the samples are sorted by text lenght whuile bucketing
min_text_len_in_bucket = 0
bucket_items = []
for batch_idx, batch in enumerate(list(sampler)):
if (batch_idx + 1) % bucket_size_multiplier == 0:
for bucket_item in bucket_items:
self.assertLessEqual(min_text_len_in_bucket, len(train_samples[bucket_item]["text"]))
min_text_len_in_bucket = len(train_samples[bucket_item]["text"])
min_text_len_in_bucket = 0
bucket_items = []
else:
bucket_items += batch
# check sampler length
self.assertEqual(len(sampler), len(train_samples) // 7)
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fstencode-main.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Encode transducer labels and/or weights.
#include <cstring>
#include <memory>
#include <string>
#include <fst/flags.h>
#include <fst/script/decode.h>
#include <fst/script/encode.h>
#include <fst/script/getters.h>
DECLARE_bool(encode_labels);
DECLARE_bool(encode_weights);
DECLARE_bool(encode_reuse);
DECLARE_bool(decode);
int fstencode_main(int argc, char **argv) {
namespace s = fst::script;
using fst::script::FstClass;
using fst::script::MutableFstClass;
string usage = "Encodes transducer labels and/or weights.\n\n Usage: ";
usage += argv[0];
usage += " in.fst codex [out.fst]\n";
std::set_new_handler(FailedNewHandler);
SET_FLAGS(usage.c_str(), &argc, &argv, true);
if (argc < 3 || argc > 4) {
ShowUsage();
return 1;
}
const string in_name = (strcmp(argv[1], "-") != 0) ? argv[1] : "";
const string codex_name = argv[2];
const string out_name = argc > 3 ? argv[3] : "";
std::unique_ptr<MutableFstClass> fst(MutableFstClass::Read(in_name, true));
if (!fst) return 1;
if (FLAGS_decode) {
s::Decode(fst.get(), codex_name);
return !fst->Write(out_name);
} else {
const auto flags =
s::GetEncodeFlags(FLAGS_encode_labels, FLAGS_encode_weights);
s::Encode(fst.get(), flags, FLAGS_encode_reuse, codex_name);
return !fst->Write(out_name);
}
}
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/string-weight.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// String weight set and associated semiring operation definitions.
#ifndef FST_STRING_WEIGHT_H_
#define FST_STRING_WEIGHT_H_
#include <cstdlib>
#include <list>
#include <string>
#include <vector>
#include <fst/product-weight.h>
#include <fst/union-weight.h>
#include <fst/weight.h>
namespace fst {
constexpr int kStringInfinity = -1; // Label for the infinite string.
constexpr int kStringBad = -2; // Label for a non-string.
constexpr char kStringSeparator = '_'; // Label separator in strings.
// Determines whether to use left or right string semiring. Includes a
// 'restricted' version that signals an error if proper prefixes/suffixes
// would otherwise be returned by Plus, useful with various
// algorithms that require functional transducer input with the
// string semirings.
enum StringType { STRING_LEFT = 0, STRING_RIGHT = 1, STRING_RESTRICT = 2 };
constexpr StringType ReverseStringType(StringType s) {
return s == STRING_LEFT ? STRING_RIGHT
: (s == STRING_RIGHT ? STRING_LEFT : STRING_RESTRICT);
}
template <class>
class StringWeightIterator;
template <class>
class StringWeightReverseIterator;
// String semiring: (longest_common_prefix/suffix, ., Infinity, Epsilon)
template <typename Label_, StringType S = STRING_LEFT>
class StringWeight {
public:
using Label = Label_;
using ReverseWeight = StringWeight<Label, ReverseStringType(S)>;
using Iterator = StringWeightIterator<StringWeight>;
using ReverseIterator = StringWeightReverseIterator<StringWeight>;
friend class StringWeightIterator<StringWeight>;
friend class StringWeightReverseIterator<StringWeight>;
StringWeight() {}
template <typename Iterator>
StringWeight(const Iterator &begin, const Iterator &end) {
for (auto iter = begin; iter != end; ++iter) PushBack(*iter);
}
explicit StringWeight(Label label) { PushBack(label); }
static const StringWeight &Zero() {
static const auto *const zero = new StringWeight(Label(kStringInfinity));
return *zero;
}
static const StringWeight &One() {
static const auto *const one = new StringWeight();
return *one;
}
static const StringWeight &NoWeight() {
static const auto *const no_weight = new StringWeight(Label(kStringBad));
return *no_weight;
}
static const string &Type() {
static const string *const type = new string(
S == STRING_LEFT
? "left_string"
: (S == STRING_RIGHT ? "right_string" : "restricted_string"));
return *type;
}
bool Member() const;
std::istream &Read(std::istream &strm);
std::ostream &Write(std::ostream &strm) const;
size_t Hash() const;
StringWeight Quantize(float delta = kDelta) const { return *this; }
ReverseWeight Reverse() const;
static constexpr uint64_t Properties() {
return kIdempotent |
(S == STRING_LEFT ? kLeftSemiring
: (S == STRING_RIGHT
? kRightSemiring
: /* S == STRING_RESTRICT */ kLeftSemiring |
kRightSemiring));
}
// These operations combined with the StringWeightIterator and
// StringWeightReverseIterator provide the access and mutation of the string
// internal elements.
// Clear existing StringWeight.
void Clear() {
first_ = 0;
rest_.clear();
}
size_t Size() const { return first_ ? rest_.size() + 1 : 0; }
void PushFront(Label label) {
if (first_) rest_.push_front(first_);
first_ = label;
}
void PushBack(Label label) {
if (!first_) {
first_ = label;
} else {
rest_.push_back(label);
}
}
private:
Label first_ = 0; // First label in string (0 if empty).
std::list<Label> rest_; // Remaining labels in string.
};
// Traverses string in forward direction.
template <class StringWeight_>
class StringWeightIterator {
public:
using Weight = StringWeight_;
using Label = typename Weight::Label;
explicit StringWeightIterator(const Weight &w)
: first_(w.first_), rest_(w.rest_), init_(true), iter_(rest_.begin()) {}
bool Done() const {
if (init_) {
return first_ == 0;
} else {
return iter_ == rest_.end();
}
}
const Label &Value() const { return init_ ? first_ : *iter_; }
void Next() {
if (init_) {
init_ = false;
} else {
++iter_;
}
}
void Reset() {
init_ = true;
iter_ = rest_.begin();
}
private:
const Label &first_;
const decltype(Weight::rest_) &rest_;
bool init_; // In the initialized state?
typename std::remove_reference<decltype (Weight::rest_)>::type::const_iterator iter_;
};
// Traverses string in backward direction.
template <class StringWeight_>
class StringWeightReverseIterator {
public:
using Weight = StringWeight_;
using Label = typename Weight::Label;
explicit StringWeightReverseIterator(const Weight &w)
: first_(w.first_),
rest_(w.rest_),
fin_(first_ == Label()),
iter_(rest_.rbegin()) {}
bool Done() const { return fin_; }
const Label &Value() const { return iter_ == rest_.rend() ? first_ : *iter_; }
void Next() {
if (iter_ == rest_.rend()) {
fin_ = true;
} else {
++iter_;
}
}
void Reset() {
fin_ = false;
iter_ = rest_.rbegin();
}
private:
const Label &first_;
const decltype(Weight::rest_) &rest_;
bool fin_; // In the final state?
typename std::remove_reference<decltype (Weight::rest_)>::type::const_reverse_iterator iter_;
};
// StringWeight member functions follow that require
// StringWeightIterator or StringWeightReverseIterator.
template <typename Label, StringType S>
inline std::istream &StringWeight<Label, S>::Read(std::istream &strm) {
Clear();
int32_t size;
ReadType(strm, &size);
for (int32_t i = 0; i < size; ++i) {
Label label;
ReadType(strm, &label);
PushBack(label);
}
return strm;
}
template <typename Label, StringType S>
inline std::ostream &StringWeight<Label, S>::Write(std::ostream &strm) const {
const int32_t size = Size();
WriteType(strm, size);
for (Iterator iter(*this); !iter.Done(); iter.Next()) {
WriteType(strm, iter.Value());
}
return strm;
}
template <typename Label, StringType S>
inline bool StringWeight<Label, S>::Member() const {
Iterator iter(*this);
return iter.Value() != Label(kStringBad);
}
template <typename Label, StringType S>
inline typename StringWeight<Label, S>::ReverseWeight
StringWeight<Label, S>::Reverse() const {
ReverseWeight rweight;
for (Iterator iter(*this); !iter.Done(); iter.Next()) {
rweight.PushFront(iter.Value());
}
return rweight;
}
template <typename Label, StringType S>
inline size_t StringWeight<Label, S>::Hash() const {
size_t h = 0;
for (Iterator iter(*this); !iter.Done(); iter.Next()) {
h ^= h << 1 ^ iter.Value();
}
return h;
}
template <typename Label, StringType S>
inline bool operator==(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
if (w1.Size() != w2.Size()) return false;
using Iterator = typename StringWeight<Label, S>::Iterator;
Iterator iter1(w1);
Iterator iter2(w2);
for (; !iter1.Done(); iter1.Next(), iter2.Next()) {
if (iter1.Value() != iter2.Value()) return false;
}
return true;
}
template <typename Label, StringType S>
inline bool operator!=(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
return !(w1 == w2);
}
template <typename Label, StringType S>
inline bool ApproxEqual(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2,
float delta = kDelta) {
return w1 == w2;
}
template <typename Label, StringType S>
inline std::ostream &operator<<(std::ostream &strm,
const StringWeight<Label, S> &weight) {
typename StringWeight<Label, S>::Iterator iter(weight);
if (iter.Done()) {
return strm << "Epsilon";
} else if (iter.Value() == Label(kStringInfinity)) {
return strm << "Infinity";
} else if (iter.Value() == Label(kStringBad)) {
return strm << "BadString";
} else {
for (size_t i = 0; !iter.Done(); ++i, iter.Next()) {
if (i > 0) strm << kStringSeparator;
strm << iter.Value();
}
}
return strm;
}
template <typename Label, StringType S>
inline std::istream &operator>>(std::istream &strm,
StringWeight<Label, S> &weight) {
string str;
strm >> str;
using Weight = StringWeight<Label, S>;
if (str == "Infinity") {
weight = Weight::Zero();
} else if (str == "Epsilon") {
weight = Weight::One();
} else {
weight.Clear();
char *p = nullptr;
for (const char *cs = str.c_str(); !p || *p != '\0'; cs = p + 1) {
const Label label = strtoll(cs, &p, 10);
if (p == cs || (*p != 0 && *p != kStringSeparator)) {
strm.clear(std::ios::badbit);
break;
}
weight.PushBack(label);
}
}
return strm;
}
// Default is for the restricted string semiring. String equality is required
// (for non-Zero() input). The restriction is used (e.g., in determinization)
// to ensure the input is functional.
template <typename Label, StringType S>
inline StringWeight<Label, S> Plus(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
using Weight = StringWeight<Label, S>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w1 == Weight::Zero()) return w2;
if (w2 == Weight::Zero()) return w1;
if (w1 != w2) {
FSTERROR() << "StringWeight::Plus: Unequal arguments "
<< "(non-functional FST?)"
<< " w1 = " << w1 << " w2 = " << w2;
return Weight::NoWeight();
}
return w1;
}
// Longest common prefix for left string semiring.
template <typename Label>
inline StringWeight<Label, STRING_LEFT> Plus(
const StringWeight<Label, STRING_LEFT> &w1,
const StringWeight<Label, STRING_LEFT> &w2) {
using Weight = StringWeight<Label, STRING_LEFT>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w1 == Weight::Zero()) return w2;
if (w2 == Weight::Zero()) return w1;
Weight sum;
typename Weight::Iterator iter1(w1);
typename Weight::Iterator iter2(w2);
for (; !iter1.Done() && !iter2.Done() && iter1.Value() == iter2.Value();
iter1.Next(), iter2.Next()) {
sum.PushBack(iter1.Value());
}
return sum;
}
// Longest common suffix for right string semiring.
template <typename Label>
inline StringWeight<Label, STRING_RIGHT> Plus(
const StringWeight<Label, STRING_RIGHT> &w1,
const StringWeight<Label, STRING_RIGHT> &w2) {
using Weight = StringWeight<Label, STRING_RIGHT>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w1 == Weight::Zero()) return w2;
if (w2 == Weight::Zero()) return w1;
Weight sum;
typename Weight::ReverseIterator iter1(w1);
typename Weight::ReverseIterator iter2(w2);
for (; !iter1.Done() && !iter2.Done() && iter1.Value() == iter2.Value();
iter1.Next(), iter2.Next()) {
sum.PushFront(iter1.Value());
}
return sum;
}
template <typename Label, StringType S>
inline StringWeight<Label, S> Times(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
using Weight = StringWeight<Label, S>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w1 == Weight::Zero() || w2 == Weight::Zero()) return Weight::Zero();
Weight product(w1);
for (typename Weight::Iterator iter(w2); !iter.Done(); iter.Next()) {
product.PushBack(iter.Value());
}
return product;
}
// Left division in a left string semiring.
template <typename Label, StringType S>
inline StringWeight<Label, S> DivideLeft(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
using Weight = StringWeight<Label, S>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w2 == Weight::Zero()) {
return Weight(Label(kStringBad));
} else if (w1 == Weight::Zero()) {
return Weight::Zero();
}
Weight result;
typename Weight::Iterator iter(w1);
size_t i = 0;
for (; !iter.Done() && i < w2.Size(); iter.Next(), ++i) {
}
for (; !iter.Done(); iter.Next()) result.PushBack(iter.Value());
return result;
}
// Right division in a right string semiring.
template <typename Label, StringType S>
inline StringWeight<Label, S> DivideRight(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2) {
using Weight = StringWeight<Label, S>;
if (!w1.Member() || !w2.Member()) return Weight::NoWeight();
if (w2 == Weight::Zero()) {
return Weight(Label(kStringBad));
} else if (w1 == Weight::Zero()) {
return Weight::Zero();
}
Weight result;
typename Weight::ReverseIterator iter(w1);
size_t i = 0;
for (; !iter.Done() && i < w2.Size(); iter.Next(), ++i) {
}
for (; !iter.Done(); iter.Next()) result.PushFront(iter.Value());
return result;
}
// Default is the restricted string semiring.
template <typename Label, StringType S>
inline StringWeight<Label, S> Divide(const StringWeight<Label, S> &w1,
const StringWeight<Label, S> &w2,
DivideType divide_type) {
using Weight = StringWeight<Label, S>;
if (divide_type == DIVIDE_LEFT) {
return DivideLeft(w1, w2);
} else if (divide_type == DIVIDE_RIGHT) {
return DivideRight(w1, w2);
} else {
FSTERROR() << "StringWeight::Divide: "
<< "Only explicit left or right division is defined "
<< "for the " << Weight::Type() << " semiring";
return Weight::NoWeight();
}
}
// Left division in the left string semiring.
template <typename Label>
inline StringWeight<Label, STRING_LEFT> Divide(
const StringWeight<Label, STRING_LEFT> &w1,
const StringWeight<Label, STRING_LEFT> &w2, DivideType divide_type) {
if (divide_type != DIVIDE_LEFT) {
FSTERROR() << "StringWeight::Divide: Only left division is defined "
<< "for the left string semiring";
return StringWeight<Label, STRING_LEFT>::NoWeight();
}
return DivideLeft(w1, w2);
}
// Right division in the right string semiring.
template <typename Label>
inline StringWeight<Label, STRING_RIGHT> Divide(
const StringWeight<Label, STRING_RIGHT> &w1,
const StringWeight<Label, STRING_RIGHT> &w2, DivideType divide_type) {
if (divide_type != DIVIDE_RIGHT) {
FSTERROR() << "StringWeight::Divide: Only right division is defined "
<< "for the right string semiring";
return StringWeight<Label, STRING_RIGHT>::NoWeight();
}
return DivideRight(w1, w2);
}
// This function object generates StringWeights that are random integer strings
// from {1, ... , alphabet_size)^{0, max_string_length} U { Zero }. This is
// intended primarily for testing.
template <class Label, StringType S>
class WeightGenerate<StringWeight<Label, S>> {
public:
using Weight = StringWeight<Label, S>;
explicit WeightGenerate(bool allow_zero = true,
size_t alphabet_size = kNumRandomWeights,
size_t max_string_length = kNumRandomWeights)
: allow_zero_(allow_zero),
alphabet_size_(alphabet_size),
max_string_length_(max_string_length) {}
Weight operator()() const {
size_t n = rand() % (max_string_length_ + allow_zero_); // NOLINT
if (allow_zero_ && n == max_string_length_) return Weight::Zero();
std::vector<Label> labels;
labels.reserve(n);
for (size_t i = 0; i < n; ++i) {
labels.push_back(rand() % alphabet_size_ + 1); // NOLINT
}
return Weight(labels.begin(), labels.end());
}
private:
// Permits Zero() and zero divisors.
const bool allow_zero_;
// Alphabet size for random weights.
const size_t alphabet_size_;
// Number of alternative random weights.
const size_t max_string_length_;
};
// Determines whether to use left, right, or (general) gallic semiring. Includes
// a restricted version that signals an error if proper string prefixes or
// suffixes would otherwise be returned by string Plus. This is useful with
// algorithms that require functional transducer input. Also includes min
// version that changes the Plus to keep only the lowest W weight string.
enum GallicType {
GALLIC_LEFT = 0,
GALLIC_RIGHT = 1,
GALLIC_RESTRICT = 2,
GALLIC_MIN = 3,
GALLIC = 4
};
constexpr StringType GallicStringType(GallicType g) {
return g == GALLIC_LEFT
? STRING_LEFT
: (g == GALLIC_RIGHT ? STRING_RIGHT : STRING_RESTRICT);
}
constexpr GallicType ReverseGallicType(GallicType g) {
return g == GALLIC_LEFT
? GALLIC_RIGHT
: (g == GALLIC_RIGHT
? GALLIC_LEFT
: (g == GALLIC_RESTRICT
? GALLIC_RESTRICT
: (g == GALLIC_MIN ? GALLIC_MIN : GALLIC)));
}
// Product of string weight and an arbitraryy weight.
template <class Label, class W, GallicType G = GALLIC_LEFT>
struct GallicWeight
: public ProductWeight<StringWeight<Label, GallicStringType(G)>, W> {
using ReverseWeight =
GallicWeight<Label, typename W::ReverseWeight, ReverseGallicType(G)>;
using SW = StringWeight<Label, GallicStringType(G)>;
using ProductWeight<SW, W>::Properties;
GallicWeight() {}
GallicWeight(SW w1, W w2) : ProductWeight<SW, W>(w1, w2) {}
explicit GallicWeight(const string &s, int *nread = nullptr)
: ProductWeight<SW, W>(s, nread) {}
explicit GallicWeight(const ProductWeight<SW, W> &w)
: ProductWeight<SW, W>(w) {}
static const GallicWeight &Zero() {
static const GallicWeight zero(ProductWeight<SW, W>::Zero());
return zero;
}
static const GallicWeight &One() {
static const GallicWeight one(ProductWeight<SW, W>::One());
return one;
}
static const GallicWeight &NoWeight() {
static const GallicWeight no_weight(ProductWeight<SW, W>::NoWeight());
return no_weight;
}
static const string &Type() {
static const string *const type = new string(
G == GALLIC_LEFT
? "left_gallic"
: (G == GALLIC_RIGHT
? "right_gallic"
: (G == GALLIC_RESTRICT
? "restricted_gallic"
: (G == GALLIC_MIN ? "min_gallic" : "gallic"))));
return *type;
}
GallicWeight Quantize(float delta = kDelta) const {
return GallicWeight(ProductWeight<SW, W>::Quantize(delta));
}
ReverseWeight Reverse() const {
return ReverseWeight(ProductWeight<SW, W>::Reverse());
}
};
// Default plus.
template <class Label, class W, GallicType G>
inline GallicWeight<Label, W, G> Plus(const GallicWeight<Label, W, G> &w,
const GallicWeight<Label, W, G> &v) {
return GallicWeight<Label, W, G>(Plus(w.Value1(), v.Value1()),
Plus(w.Value2(), v.Value2()));
}
// Min gallic plus.
template <class Label, class W>
inline GallicWeight<Label, W, GALLIC_MIN> Plus(
const GallicWeight<Label, W, GALLIC_MIN> &w1,
const GallicWeight<Label, W, GALLIC_MIN> &w2) {
static const NaturalLess<W> less;
return less(w1.Value2(), w2.Value2()) ? w1 : w2;
}
template <class Label, class W, GallicType G>
inline GallicWeight<Label, W, G> Times(const GallicWeight<Label, W, G> &w,
const GallicWeight<Label, W, G> &v) {
return GallicWeight<Label, W, G>(Times(w.Value1(), v.Value1()),
Times(w.Value2(), v.Value2()));
}
template <class Label, class W, GallicType G>
inline GallicWeight<Label, W, G> Divide(const GallicWeight<Label, W, G> &w,
const GallicWeight<Label, W, G> &v,
DivideType divide_type = DIVIDE_ANY) {
return GallicWeight<Label, W, G>(Divide(w.Value1(), v.Value1(), divide_type),
Divide(w.Value2(), v.Value2(), divide_type));
}
// This function object generates gallic weights by calling an underlying
// product weight generator. This is intended primarily for testing.
template <class Label, class W, GallicType G>
class WeightGenerate<GallicWeight<Label, W, G>>
: public WeightGenerate<
ProductWeight<StringWeight<Label, GallicStringType(G)>, W>> {
public:
using Weight = GallicWeight<Label, W, G>;
using Generate = WeightGenerate<
ProductWeight<StringWeight<Label, GallicStringType(G)>, W>>;
explicit WeightGenerate(bool allow_zero = true) : generate_(allow_zero) {}
Weight operator()() const { return Weight(generate_()); }
private:
const Generate generate_;
};
// Union weight options for (general) GALLIC type.
template <class Label, class W>
struct GallicUnionWeightOptions {
using ReverseOptions = GallicUnionWeightOptions<Label, W>;
using GW = GallicWeight<Label, W, GALLIC_RESTRICT>;
using SW = StringWeight<Label, GallicStringType(GALLIC_RESTRICT)>;
using SI = StringWeightIterator<SW>;
// Military order.
struct Compare {
bool operator()(const GW &w1, const GW &w2) const {
const SW &s1 = w1.Value1();
const SW &s2 = w2.Value1();
if (s1.Size() < s2.Size()) return true;
if (s1.Size() > s2.Size()) return false;
SI iter1(s1);
SI iter2(s2);
while (!iter1.Done()) {
const auto l1 = iter1.Value();
const auto l2 = iter2.Value();
if (l1 < l2) return true;
if (l1 > l2) return false;
iter1.Next();
iter2.Next();
}
return false;
}
};
// Adds W weights when string part equal.
struct Merge {
GW operator()(const GW &w1, const GW &w2) const {
return GW(w1.Value1(), Plus(w1.Value2(), w2.Value2()));
}
};
};
// Specialization for the (general) GALLIC type.
template <class Label, class W>
struct GallicWeight<Label, W, GALLIC>
: public UnionWeight<GallicWeight<Label, W, GALLIC_RESTRICT>,
GallicUnionWeightOptions<Label, W>> {
using GW = GallicWeight<Label, W, GALLIC_RESTRICT>;
using SW = StringWeight<Label, GallicStringType(GALLIC_RESTRICT)>;
using SI = StringWeightIterator<SW>;
using UW = UnionWeight<GW, GallicUnionWeightOptions<Label, W>>;
using UI = UnionWeightIterator<GW, GallicUnionWeightOptions<Label, W>>;
using ReverseWeight = GallicWeight<Label, W, GALLIC>;
using UW::Properties;
GallicWeight() {}
// Copy constructor.
GallicWeight(const UW &weight) : UW(weight) {} // NOLINT
// Singleton constructors: create a GALLIC weight containing a single
// GALLIC_RESTRICT weight. Takes as argument (1) a GALLIC_RESTRICT weight or
// (2) the two components of a GALLIC_RESTRICT weight.
explicit GallicWeight(const GW &weight) : UW(weight) {}
GallicWeight(SW w1, W w2) : UW(GW(w1, w2)) {}
explicit GallicWeight(const string &str, int *nread = nullptr)
: UW(str, nread) {}
static const GallicWeight<Label, W, GALLIC> &Zero() {
static const GallicWeight<Label, W, GALLIC> zero(UW::Zero());
return zero;
}
static const GallicWeight<Label, W, GALLIC> &One() {
static const GallicWeight<Label, W, GALLIC> one(UW::One());
return one;
}
static const GallicWeight<Label, W, GALLIC> &NoWeight() {
static const GallicWeight<Label, W, GALLIC> no_weight(UW::NoWeight());
return no_weight;
}
static const string &Type() {
static const string *const type = new string("gallic");
return *type;
}
GallicWeight<Label, W, GALLIC> Quantize(float delta = kDelta) const {
return UW::Quantize(delta);
}
ReverseWeight Reverse() const { return UW::Reverse(); }
};
// (General) gallic plus.
template <class Label, class W>
inline GallicWeight<Label, W, GALLIC> Plus(
const GallicWeight<Label, W, GALLIC> &w1,
const GallicWeight<Label, W, GALLIC> &w2) {
using GW = GallicWeight<Label, W, GALLIC_RESTRICT>;
using UW = UnionWeight<GW, GallicUnionWeightOptions<Label, W>>;
return Plus(static_cast<UW>(w1), static_cast<UW>(w2));
}
// (General) gallic times.
template <class Label, class W>
inline GallicWeight<Label, W, GALLIC> Times(
const GallicWeight<Label, W, GALLIC> &w1,
const GallicWeight<Label, W, GALLIC> &w2) {
using GW = GallicWeight<Label, W, GALLIC_RESTRICT>;
using UW = UnionWeight<GW, GallicUnionWeightOptions<Label, W>>;
return Times(static_cast<UW>(w1), static_cast<UW>(w2));
}
// (General) gallic divide.
template <class Label, class W>
inline GallicWeight<Label, W, GALLIC> Divide(
const GallicWeight<Label, W, GALLIC> &w1,
const GallicWeight<Label, W, GALLIC> &w2,
DivideType divide_type = DIVIDE_ANY) {
using GW = GallicWeight<Label, W, GALLIC_RESTRICT>;
using UW = UnionWeight<GW, GallicUnionWeightOptions<Label, W>>;
return Divide(static_cast<UW>(w1), static_cast<UW>(w2), divide_type);
}
// This function object generates gallic weights by calling an underlying
// union weight generator. This is intended primarily for testing.
template <class Label, class W>
class WeightGenerate<GallicWeight<Label, W, GALLIC>>
: public WeightGenerate<UnionWeight<GallicWeight<Label, W, GALLIC_RESTRICT>,
GallicUnionWeightOptions<Label, W>>> {
public:
using Weight = GallicWeight<Label, W, GALLIC>;
using Generate =
WeightGenerate<UnionWeight<GallicWeight<Label, W, GALLIC_RESTRICT>,
GallicUnionWeightOptions<Label, W>>>;
explicit WeightGenerate(bool allow_zero = true) : generate_(allow_zero) {}
Weight operator()() const { return Weight(generate_()); }
private:
const Generate generate_;
};
} // namespace fst
#endif // FST_STRING_WEIGHT_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/python/basictypes.pxd | # See www.openfst.org for extensive documentation on this weighted
# finite-state transducer library.
from libc.stdint cimport *
cdef extern from "<fst/types.h>" nogil:
ctypedef int8_t int8
ctypedef int16_t int16
ctypedef int32_t int32
ctypedef int64_t int64
ctypedef uint8_t uint8
ctypedef uint16_t uint16
ctypedef uint32_t uint32
ctypedef uint64_t uint64
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/compile.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <istream>
#include <string>
#include <fst/script/compile.h>
#include <fst/script/fst-class.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void CompileFst(std::istream &istrm, const string &source, const string &dest,
const string &fst_type, const string &arc_type,
const SymbolTable *isyms, const SymbolTable *osyms,
const SymbolTable *ssyms, bool accep, bool ikeep, bool okeep,
bool nkeep, bool allow_negative_labels) {
std::unique_ptr<FstClass> fst(
CompileFstInternal(istrm, source, fst_type, arc_type, isyms, osyms, ssyms,
accep, ikeep, okeep, nkeep, allow_negative_labels));
fst->Write(dest);
}
FstClass *CompileFstInternal(std::istream &istrm, const string &source,
const string &fst_type, const string &arc_type,
const SymbolTable *isyms, const SymbolTable *osyms,
const SymbolTable *ssyms, bool accep, bool ikeep,
bool okeep, bool nkeep,
bool allow_negative_labels) {
CompileFstInnerArgs iargs(istrm, source, fst_type, isyms, osyms, ssyms, accep,
ikeep, okeep, nkeep, allow_negative_labels);
CompileFstArgs args(iargs);
Apply<Operation<CompileFstArgs>>("CompileFstInternal", arc_type, &args);
return args.retval;
}
// This registers 2; 1 does not require registration.
REGISTER_FST_OPERATION(CompileFstInternal, StdArc, CompileFstArgs);
REGISTER_FST_OPERATION(CompileFstInternal, LogArc, CompileFstArgs);
REGISTER_FST_OPERATION(CompileFstInternal, Log64Arc, CompileFstArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/Trainer | coqui_public_repos/Trainer/trainer/analytics.py | import os
import requests
telemetry = os.environ.get("TRAINER_TELEMETRY")
def ping_training_run():
if telemetry == "0":
return
URL = "https://coqui.gateway.scarf.sh/trainer/training_run"
_ = requests.get(URL, timeout=5)
| 0 |
coqui_public_repos/snakepit/src | coqui_public_repos/snakepit/src/routes/nodes.js | const Router = require('express-promise-router')
const Parallel = require('async-parallel')
const fs = require('fs-extra')
const path = require('path')
const log = require('../utils/logger.js')
const clusterEvents = require('../utils/clusterEvents.js')
const pitRunner = require('../pitRunner.js')
const Pit = require('../models/Pit-model.js')
const Node = require('../models/Node-model.js')
const Resource = require('../models/Resource-model.js')
const { getAlias } = require('../models/Alias-model.js')
const { getScript } = require('../utils/scripts.js')
const { ensureSignedIn, ensureAdmin, tryTargetNode, targetNode, targetGroup } = require('./mw.js')
const resourceParser = /resource:([^,]*),([^,]*),([^,]*)/
async function getResourcesFromScan (pitId) {
let workers = await pitRunner.getResults(pitId)
if (workers.length <= 0) {
return
}
let resources = []
if (!(workers[0].result)) {
return resources
}
for (let line of workers[0].result.split('\n')) {
let match = resourceParser.exec(line)
if (match) {
let resource = Resource.build({
type: match[1],
name: match[3],
index: Number(match[2])
})
resources.push(resource)
}
}
return resources
}
var router = module.exports = new Router()
router.use(ensureSignedIn)
router.get('/', async (req, res) => {
res.status(200).send((await Node.findAll()).map(node => node.id))
})
router.get('/:node', targetNode, async (req, res) => {
let dbResources = await req.targetNode.getResources()
res.status(200).json({
id: req.targetNode.id,
endpoint: req.targetNode.endpoint,
online: req.targetNode.online,
since: req.targetNode.since,
resources: dbResources.length == 0 ? undefined : await Parallel.map(dbResources, async dbResource => {
let dbGroups = await dbResource.getResourcegroups()
return {
type: dbResource.type,
name: dbResource.name,
index: dbResource.index,
groups: dbGroups.length == 0 ? undefined : dbGroups.map(group => group.groupId),
alias: await getAlias(dbResource.name)
}
})
})
})
router.use(ensureAdmin)
router.put('/:node', tryTargetNode, async (req, res) => {
let id = req.params.node
let node = req.body
if (req.targetNode) {
res.status(400).send({ message: 'Node with same id already registered' })
} else if (node && node.endpoint && node.password) {
let pit
let dbnode
try {
dbnode = await Node.create({
id: id,
endpoint: node.endpoint,
password: node.password,
online: true,
since: Date.now(),
available: false
})
pit = await Pit.create()
await fs.writeFile(path.join(pit.getDir(), 'script.sh'), getScript('scan.sh'))
await pitRunner.runPit(pit.id, {}, [{
node: dbnode,
devices: { 'gpu': { type: 'gpu' } }
}])
let resources = await getResourcesFromScan(pit.id)
if (resources) {
resources.forEach(async resource => {
await resource.save()
await dbnode.addResource(resource)
})
dbnode.online = true
dbnode.available = true
await dbnode.save()
res.send()
} else {
throw new Error('Node scanning failed')
}
} catch (ex) {
if (dbnode) {
await dbnode.destroy()
}
res.status(400).send({ message: 'Problem adding node' })
} finally {
if (pit) {
await pit.destroy()
}
}
} else {
res.status(400).send()
}
})
router.delete('/:node', targetNode, async (req, res) => {
await req.targetNode.destroy()
res.send()
})
router.put('/:node/groups/:group', targetNode, targetGroup, async (req, res) => {
for (let resource of await req.targetNode.getResources()) {
await Resource.ResourceGroup.upsert({ resourceId: resource.id, groupId: req.targetGroup.id })
}
res.send()
clusterEvents.emit('restricted')
})
router.delete('/:node/groups/:group', targetNode, targetGroup, async (req, res) => {
for (let resource of await req.targetNode.getResources()) {
await Resource.ResourceGroup.destroy({ where: { resourceId: resource.id, groupId: req.targetGroup.id } })
}
res.send()
clusterEvents.emit('restricted')
})
async function targetResource (req, res) {
let targetResources = await req.targetNode.getResources({ where: { index: req.params.resource } })
req.targetResource = targetResources.length == 1 ? targetResources[0] : undefined
return req.targetResource ? Promise.resolve('next') : Promise.reject({ code: 404, message: 'Resource not found' })
}
router.put('/:node/resources/:resource/groups/:group', targetNode, targetResource, targetGroup, async (req, res) => {
await Resource.ResourceGroup.upsert({ resourceId: req.targetResource.id, groupId: req.targetGroup.id })
res.send()
clusterEvents.emit('restricted')
})
router.delete('/:node/resources/:resource/groups/:group', targetNode, targetResource, targetGroup, async (req, res) => {
await Resource.ResourceGroup.destroy({ where: { resourceId: req.targetResource.id, groupId: req.targetGroup.id } })
res.send()
clusterEvents.emit('restricted')
})
| 0 |
coqui_public_repos/STT | coqui_public_repos/STT/taskcluster/test-python_37_16k-linux-amd64-opt.yml | build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-cpu-opt"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.7.6:m 16k"
workerType: "${docker.dsTests}"
metadata:
name: "DeepSpeech Linux AMD64 CPU Python v3.7 tests (16kHz)"
description: "Testing DeepSpeech for Linux/AMD64 on Python v3.7, CPU only, optimized version (16kHz)"
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/test/CMakeLists.txt | add_executable(fst_test
fst_test.cc
fst_test.h
)
target_link_libraries(fst_test fst ${CMAKE_DL_LIBS})
set_target_properties(fst_test PROPERTIES FOLDER test)
add_test(NAME fst_test-test COMMAND fst_test)
add_executable(weight_test
weight_test.cc
weight-tester.h
)
target_link_libraries(weight_test fst ${CMAKE_DL_LIBS})
set_target_properties(weight_test PROPERTIES FOLDER test)
add_test(NAME weight_test-test COMMAND weight_test)
add_executable(algo_test_log algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_log fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_log
PRIVATE TEST_LOG=1)
set_target_properties(algo_test_log PROPERTIES FOLDER test)
add_test(NAME algo_test_log-test COMMAND algo_test_log)
add_executable(algo_test_tropical algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_tropical fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_tropical
PRIVATE TEST_TROPICAL=1)
set_target_properties(algo_test_tropical PROPERTIES FOLDER test)
add_test(NAME algo_test_tropical-test COMMAND algo_test_tropical)
add_executable(algo_test_minmax algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_minmax fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_minmax
PRIVATE TEST_MINMAX=1)
set_target_properties(algo_test_minmax PROPERTIES FOLDER test)
add_test(NAME algo_test_minmax-test COMMAND algo_test_minmax)
add_executable(algo_test_lexicographic algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_lexicographic fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_lexicographic
PRIVATE TEST_LEXICOGRAPHIC=1)
set_target_properties(algo_test_lexicographic PROPERTIES FOLDER test)
add_test(NAME algo_test_lexicographic-test COMMAND algo_test_lexicographic)
add_executable(algo_test_power algo_test.cc algo_test.h rand-fst.h)
target_link_libraries(algo_test_power fst ${CMAKE_DL_LIBS})
target_compile_definitions(algo_test_power
PRIVATE TEST_POWER=1)
set_target_properties(algo_test_power PROPERTIES FOLDER test)
add_test(NAME algo_test_power-test COMMAND algo_test_power)
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/decoder | coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/decoder/lm/KenLM.h | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <memory>
#include "flashlight/lib/text/decoder/lm/LM.h"
#include "flashlight/lib/text/dictionary/Dictionary.h"
// Forward declarations to avoid including KenLM headers
namespace lm {
namespace base {
struct Vocabulary;
struct Model;
} // namespace base
namespace ngram {
struct State;
} // namespace ngram
} // namespace lm
namespace fl {
namespace lib {
namespace text {
/**
* KenLMState is a state object from KenLM, which contains context length,
* indicies and compare functions
* https://github.com/kpu/kenlm/blob/master/lm/state.hh.
*/
struct KenLMState : LMState {
KenLMState();
std::unique_ptr<lm::ngram::State> ken_;
lm::ngram::State* ken() {
return ken_.get();
}
};
/**
* KenLM extends LM by using the toolkit https://kheafield.com/code/kenlm/.
*/
class KenLM : public LM {
public:
KenLM(const std::string& path, const Dictionary& usrTknDict);
LMStatePtr start(bool startWithNothing) override;
std::pair<LMStatePtr, float> score(
const LMStatePtr& state,
const int usrTokenIdx) override;
std::pair<LMStatePtr, float> finish(const LMStatePtr& state) override;
private:
std::shared_ptr<lm::base::Model> model_;
const lm::base::Vocabulary* vocab_;
};
using KenLMPtr = std::shared_ptr<KenLM>;
} // namespace text
} // namespace lib
} // namespace fl
| 0 |
coqui_public_repos/TTS/TTS/tts/utils/text | coqui_public_repos/TTS/TTS/tts/utils/text/phonemizers/gruut_wrapper.py | import importlib
from typing import List
import gruut
from gruut_ipa import IPA
from TTS.tts.utils.text.phonemizers.base import BasePhonemizer
from TTS.tts.utils.text.punctuation import Punctuation
# Table for str.translate to fix gruut/TTS phoneme mismatch
GRUUT_TRANS_TABLE = str.maketrans("g", "ɡ")
class Gruut(BasePhonemizer):
"""Gruut wrapper for G2P
Args:
language (str):
Valid language code for the used backend.
punctuations (str):
Characters to be treated as punctuation. Defaults to `Punctuation.default_puncs()`.
keep_puncs (bool):
If true, keep the punctuations after phonemization. Defaults to True.
use_espeak_phonemes (bool):
If true, use espeak lexicons instead of default Gruut lexicons. Defaults to False.
keep_stress (bool):
If true, keep the stress characters after phonemization. Defaults to False.
Example:
>>> from TTS.tts.utils.text.phonemizers.gruut_wrapper import Gruut
>>> phonemizer = Gruut('en-us')
>>> phonemizer.phonemize("Be a voice, not an! echo?", separator="|")
'b|i| ə| v|ɔ|ɪ|s, n|ɑ|t| ə|n! ɛ|k|o|ʊ?'
"""
def __init__(
self,
language: str,
punctuations=Punctuation.default_puncs(),
keep_puncs=True,
use_espeak_phonemes=False,
keep_stress=False,
):
super().__init__(language, punctuations=punctuations, keep_puncs=keep_puncs)
self.use_espeak_phonemes = use_espeak_phonemes
self.keep_stress = keep_stress
@staticmethod
def name():
return "gruut"
def phonemize_gruut(self, text: str, separator: str = "|", tie=False) -> str: # pylint: disable=unused-argument
"""Convert input text to phonemes.
Gruut phonemizes the given `str` by seperating each phoneme character with `separator`, even for characters
that constitude a single sound.
It doesn't affect 🐸TTS since it individually converts each character to token IDs.
Examples::
"hello how are you today?" -> `h|ɛ|l|o|ʊ| h|a|ʊ| ɑ|ɹ| j|u| t|ə|d|e|ɪ`
Args:
text (str):
Text to be converted to phonemes.
tie (bool, optional) : When True use a '͡' character between
consecutive characters of a single phoneme. Else separate phoneme
with '_'. This option requires espeak>=1.49. Default to False.
"""
ph_list = []
for sentence in gruut.sentences(text, lang=self.language, espeak=self.use_espeak_phonemes):
for word in sentence:
if word.is_break:
# Use actual character for break phoneme (e.g., comma)
if ph_list:
# Join with previous word
ph_list[-1].append(word.text)
else:
# First word is punctuation
ph_list.append([word.text])
elif word.phonemes:
# Add phonemes for word
word_phonemes = []
for word_phoneme in word.phonemes:
if not self.keep_stress:
# Remove primary/secondary stress
word_phoneme = IPA.without_stress(word_phoneme)
word_phoneme = word_phoneme.translate(GRUUT_TRANS_TABLE)
if word_phoneme:
# Flatten phonemes
word_phonemes.extend(word_phoneme)
if word_phonemes:
ph_list.append(word_phonemes)
ph_words = [separator.join(word_phonemes) for word_phonemes in ph_list]
ph = f"{separator} ".join(ph_words)
return ph
def _phonemize(self, text, separator):
return self.phonemize_gruut(text, separator, tie=False)
def is_supported_language(self, language):
"""Returns True if `language` is supported by the backend"""
return gruut.is_language_supported(language)
@staticmethod
def supported_languages() -> List:
"""Get a dictionary of supported languages.
Returns:
List: List of language codes.
"""
return list(gruut.get_supported_languages())
def version(self):
"""Get the version of the used backend.
Returns:
str: Version of the used backend.
"""
return gruut.__version__
@classmethod
def is_available(cls):
"""Return true if ESpeak is available else false"""
return importlib.util.find_spec("gruut") is not None
if __name__ == "__main__":
e = Gruut(language="en-us")
print(e.supported_languages())
print(e.version())
print(e.language)
print(e.name())
print(e.is_available())
e = Gruut(language="en-us", keep_puncs=False)
print("`" + e.phonemize("hello how are you today?") + "`")
e = Gruut(language="en-us", keep_puncs=True)
print("`" + e.phonemize("hello how, are you today?") + "`")
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/compress/compress-script.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Declarations of 'scriptable' versions of compression operations, that is,
// those that can be called with FstClass-type arguments.
#ifndef FST_EXTENSIONS_COMPRESS_COMPRESS_SCRIPT_H_
#define FST_EXTENSIONS_COMPRESS_COMPRESS_SCRIPT_H_
#include <string>
#include <tuple>
#include <fst/log.h>
#include <fst/extensions/compress/compress.h>
#include <fst/mutable-fst.h>
#include <fst/util.h>
#include <fst/script/fst-class.h>
namespace fst {
namespace script {
typedef std::tuple<const FstClass &, const string &, const bool> CompressArgs;
template <class Arc>
void Compress(CompressArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>());
const string &filename = std::get<1>(*args);
const bool gzip = std::get<2>(*args);
if (!fst::Compress(fst, filename, gzip)) FSTERROR() << "Compress: failed";
}
void Compress(const FstClass &fst, const string &filename, const bool gzip);
typedef std::tuple<const string &, MutableFstClass *, const bool>
DecompressArgs;
template <class Arc>
void Decompress(DecompressArgs *args) {
const string &filename = std::get<0>(*args);
MutableFst<Arc> *fst = std::get<1>(*args)->GetMutableFst<Arc>();
const bool gzip = std::get<2>(*args);
if (!fst::Decompress(filename, fst, gzip))
FSTERROR() << "Decompress: failed";
}
void Decompress(const string &filename, MutableFstClass *fst, const bool gzip);
} // namespace script
} // namespace fst
#endif // FST_EXTENSIONS_COMPRESS_COMPRESS_SCRIPT_H_
| 0 |
coqui_public_repos/inference-engine/third_party/cereal/include/cereal | coqui_public_repos/inference-engine/third_party/cereal/include/cereal/types/stack.hpp | /*! \file stack.hpp
\brief Support for types found in \<stack\>
\ingroup STLSupport */
/*
Copyright (c) 2014, Randolph Voorhies, Shane Grant
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of cereal nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CEREAL_TYPES_STACK_HPP_
#define CEREAL_TYPES_STACK_HPP_
#include "cereal/cereal.hpp"
#include <stack>
// The default container for stack is deque, so let's include that too
#include "cereal/types/deque.hpp"
namespace cereal
{
namespace stack_detail
{
//! Allows access to the protected container in stack
template <class T, class C> inline
C const & container( std::stack<T, C> const & stack )
{
struct H : public std::stack<T, C>
{
static C const & get( std::stack<T, C> const & s )
{
return s.*(&H::c);
}
};
return H::get( stack );
}
}
//! Saving for std::stack
template <class Archive, class T, class C> inline
void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::stack<T, C> const & stack )
{
ar( CEREAL_NVP_("container", stack_detail::container( stack )) );
}
//! Loading for std::stack
template <class Archive, class T, class C> inline
void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::stack<T, C> & stack )
{
C container;
ar( CEREAL_NVP_("container", container) );
stack = std::stack<T, C>( std::move( container ) );
}
} // namespace cereal
#endif // CEREAL_TYPES_STACK_HPP_
| 0 |
coqui_public_repos/inference-engine/third_party/kenlm/lm | coqui_public_repos/inference-engine/third_party/kenlm/lm/wrappers/nplm.cc | #include "lm/wrappers/nplm.hh"
#include "util/exception.hh"
#include "util/file.hh"
#include <algorithm>
#include <cstring>
#include "neuralLM.h"
namespace lm {
namespace np {
Vocabulary::Vocabulary(const nplm::vocabulary &vocab)
: base::Vocabulary(vocab.lookup_word("<s>"), vocab.lookup_word("</s>"), vocab.lookup_word("<unk>")),
vocab_(vocab), null_word_(vocab.lookup_word("<null>")) {}
Vocabulary::~Vocabulary() {}
WordIndex Vocabulary::Index(const std::string &str) const {
return vocab_.lookup_word(str);
}
class Backend {
public:
Backend(const nplm::neuralLM &from, const std::size_t cache_size) : lm_(from), ngram_(from.get_order()) {
lm_.set_cache(cache_size);
}
nplm::neuralLM &LM() { return lm_; }
const nplm::neuralLM &LM() const { return lm_; }
Eigen::Matrix<int,Eigen::Dynamic,1> &staging_ngram() { return ngram_; }
double lookup_from_staging() { return lm_.lookup_ngram(ngram_); }
int order() const { return lm_.get_order(); }
private:
nplm::neuralLM lm_;
Eigen::Matrix<int,Eigen::Dynamic,1> ngram_;
};
bool Model::Recognize(const std::string &name) {
try {
util::scoped_fd file(util::OpenReadOrThrow(name.c_str()));
char magic_check[16];
util::ReadOrThrow(file.get(), magic_check, sizeof(magic_check));
const char nnlm_magic[] = "\\config\nversion ";
return !memcmp(magic_check, nnlm_magic, 16);
} catch (const util::Exception &) {
return false;
}
}
namespace {
nplm::neuralLM *LoadNPLM(const std::string &file) {
util::scoped_ptr<nplm::neuralLM> ret(new nplm::neuralLM());
ret->read(file);
return ret.release();
}
} // namespace
Model::Model(const std::string &file, std::size_t cache)
: base_instance_(LoadNPLM(file)), vocab_(base_instance_->get_vocabulary()), cache_size_(cache) {
UTIL_THROW_IF(base_instance_->get_order() > NPLM_MAX_ORDER, util::Exception, "This NPLM has order " << (unsigned int)base_instance_->get_order() << " but the KenLM wrapper was compiled with " << NPLM_MAX_ORDER << ". Change the defintion of NPLM_MAX_ORDER and recompile.");
// log10 compatible with backoff models.
base_instance_->set_log_base(10.0);
State begin_sentence, null_context;
std::fill(begin_sentence.words, begin_sentence.words + NPLM_MAX_ORDER - 1, base_instance_->lookup_word("<s>"));
null_word_ = base_instance_->lookup_word("<null>");
std::fill(null_context.words, null_context.words + NPLM_MAX_ORDER - 1, null_word_);
Init(begin_sentence, null_context, vocab_, base_instance_->get_order());
}
Model::~Model() {}
FullScoreReturn Model::FullScore(const State &from, const WordIndex new_word, State &out_state) const {
Backend *backend = backend_.get();
if (!backend) {
backend = new Backend(*base_instance_, cache_size_);
backend_.reset(backend);
}
// State is in natural word order.
FullScoreReturn ret;
for (int i = 0; i < backend->order() - 1; ++i) {
backend->staging_ngram()(i) = from.words[i];
}
backend->staging_ngram()(backend->order() - 1) = new_word;
ret.prob = backend->lookup_from_staging();
// Always say full order.
ret.ngram_length = backend->order();
// Shift everything down by one.
memcpy(out_state.words, from.words + 1, sizeof(WordIndex) * (backend->order() - 2));
out_state.words[backend->order() - 2] = new_word;
// Fill in trailing words with zeros so state comparison works.
memset(out_state.words + backend->order() - 1, 0, sizeof(WordIndex) * (NPLM_MAX_ORDER - backend->order()));
return ret;
}
// TODO: optimize with direct call?
FullScoreReturn Model::FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const {
// State is in natural word order. The API here specifies reverse order.
std::size_t state_length = std::min<std::size_t>(Order() - 1, context_rend - context_rbegin);
State state;
// Pad with null words.
for (lm::WordIndex *i = state.words; i < state.words + Order() - 1 - state_length; ++i) {
*i = null_word_;
}
// Put new words at the end.
std::reverse_copy(context_rbegin, context_rbegin + state_length, state.words + Order() - 1 - state_length);
return FullScore(state, new_word, out_state);
}
} // namespace np
} // namespace lm
| 0 |
coqui_public_repos/STT-models/indonesian/itml | coqui_public_repos/STT-models/indonesian/itml/v0.1.1/alphabet.txt |
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
r
s
t
u
v
w
x
y
z
| 0 |
coqui_public_repos/STT-models/luganda/itml | coqui_public_repos/STT-models/luganda/itml/v0.1.0/LICENSE | GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
| 0 |
coqui_public_repos/STT/native_client/java | coqui_public_repos/STT/native_client/java/libstt/CMakeLists.txt | # For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html
# Sets the minimum version of CMake required to build the native library.
cmake_minimum_required(VERSION 3.4.1)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.
add_library( # Sets the name of the library.
stt-jni
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).
../jni/stt_wrap.cpp )
add_library(stt-lib SHARED IMPORTED)
set_target_properties(stt-lib PROPERTIES
IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so)
add_library(kenlm-lib SHARED IMPORTED)
set_target_properties(kenlm-lib PROPERTIES
IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libkenlm.so)
add_custom_command(TARGET stt-jni POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so
${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libstt.so)
add_custom_command(TARGET stt-jni POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libkenlm.so
${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libkenlm.so)
# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.
find_library( # Sets the name of the path variable.
log-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log )
# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
stt-jni
stt-lib
# Links the target library to the log library
# included in the NDK.
${log-lib} )
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/mpdt/mpdtlib.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// This is an experimental multipush-down transducer (MPDT) library. An MPDT is
// encoded as an FST, where some transitions are labeled with open or close
// parentheses, each mated pair of which is associated to one stack. To be
// interpreted as an MPDT, the parentheses within a stack must balance on a
// path.
#ifndef FST_EXTENSIONS_MPDT_MPDTLIB_H_
#define FST_EXTENSIONS_MPDT_MPDTLIB_H_
#include <fst/extensions/mpdt/compose.h>
#include <fst/extensions/mpdt/expand.h>
#include <fst/extensions/mpdt/mpdt.h>
#include <fst/extensions/mpdt/reverse.h>
#endif // FST_EXTENSIONS_MPDT_MPDTLIB_H_
| 0 |
coqui_public_repos | coqui_public_repos/TTS/pyproject.toml | [build-system]
requires = [
"setuptools",
"wheel",
"cython~=0.29.30",
"numpy>=1.22.0",
"packaging",
]
[flake8]
max-line-length=120
[tool.black]
line-length = 120
target-version = ['py39']
[tool.isort]
line_length = 120
profile = "black"
multi_line_output = 3
| 0 |
coqui_public_repos/TTS/TTS | coqui_public_repos/TTS/TTS/utils/callbacks.py | class TrainerCallback:
@staticmethod
def on_init_start(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_init_start"):
trainer.model.module.on_init_start(trainer)
else:
if hasattr(trainer.model, "on_init_start"):
trainer.model.on_init_start(trainer)
if hasattr(trainer.criterion, "on_init_start"):
trainer.criterion.on_init_start(trainer)
if hasattr(trainer.optimizer, "on_init_start"):
trainer.optimizer.on_init_start(trainer)
@staticmethod
def on_init_end(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_init_end"):
trainer.model.module.on_init_end(trainer)
else:
if hasattr(trainer.model, "on_init_end"):
trainer.model.on_init_end(trainer)
if hasattr(trainer.criterion, "on_init_end"):
trainer.criterion.on_init_end(trainer)
if hasattr(trainer.optimizer, "on_init_end"):
trainer.optimizer.on_init_end(trainer)
@staticmethod
def on_epoch_start(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_epoch_start"):
trainer.model.module.on_epoch_start(trainer)
else:
if hasattr(trainer.model, "on_epoch_start"):
trainer.model.on_epoch_start(trainer)
if hasattr(trainer.criterion, "on_epoch_start"):
trainer.criterion.on_epoch_start(trainer)
if hasattr(trainer.optimizer, "on_epoch_start"):
trainer.optimizer.on_epoch_start(trainer)
@staticmethod
def on_epoch_end(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_epoch_end"):
trainer.model.module.on_epoch_end(trainer)
else:
if hasattr(trainer.model, "on_epoch_end"):
trainer.model.on_epoch_end(trainer)
if hasattr(trainer.criterion, "on_epoch_end"):
trainer.criterion.on_epoch_end(trainer)
if hasattr(trainer.optimizer, "on_epoch_end"):
trainer.optimizer.on_epoch_end(trainer)
@staticmethod
def on_train_step_start(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_train_step_start"):
trainer.model.module.on_train_step_start(trainer)
else:
if hasattr(trainer.model, "on_train_step_start"):
trainer.model.on_train_step_start(trainer)
if hasattr(trainer.criterion, "on_train_step_start"):
trainer.criterion.on_train_step_start(trainer)
if hasattr(trainer.optimizer, "on_train_step_start"):
trainer.optimizer.on_train_step_start(trainer)
@staticmethod
def on_train_step_end(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_train_step_end"):
trainer.model.module.on_train_step_end(trainer)
else:
if hasattr(trainer.model, "on_train_step_end"):
trainer.model.on_train_step_end(trainer)
if hasattr(trainer.criterion, "on_train_step_end"):
trainer.criterion.on_train_step_end(trainer)
if hasattr(trainer.optimizer, "on_train_step_end"):
trainer.optimizer.on_train_step_end(trainer)
@staticmethod
def on_keyboard_interrupt(trainer) -> None:
if hasattr(trainer.model, "module"):
if hasattr(trainer.model.module, "on_keyboard_interrupt"):
trainer.model.module.on_keyboard_interrupt(trainer)
else:
if hasattr(trainer.model, "on_keyboard_interrupt"):
trainer.model.on_keyboard_interrupt(trainer)
if hasattr(trainer.criterion, "on_keyboard_interrupt"):
trainer.criterion.on_keyboard_interrupt(trainer)
if hasattr(trainer.optimizer, "on_keyboard_interrupt"):
trainer.optimizer.on_keyboard_interrupt(trainer)
| 0 |
coqui_public_repos/snakepit/src | coqui_public_repos/snakepit/src/utils/lxd.js | const https = require('https')
const WebSocket = require('ws')
const axios = require('axios')
const assign = require('assign-deep')
const Parallel = require('async-parallel')
const log = require('../utils/logger.js')
const { to } = require('../utils/async.js')
const config = require('../config.js')
const lxdStatus = {
created: 100,
started: 101,
stopped: 102,
running: 103,
canceling: 104,
pending: 105,
starting: 106,
stopping: 107,
aborting: 108,
freezing: 109,
frozen: 110,
thawed: 111,
success: 200,
failure: 400,
cancelled: 401
}
var exports = module.exports = {}
var agent = new https.Agent({
key: config.clientKey,
cert: config.clientCert,
rejectUnauthorized: false
})
function getUrl (endpoint, resource) {
return endpoint + '/1.0' + (resource ? ('/' + resource) : '')
}
async function wrapLxdResponse (endpoint, promise, options) {
let response
try {
response = await promise
} catch (ex) {
log.debug('LXD error', ex.response && ex.response.data)
throw ex
}
let data = response.data
if (typeof data === 'string' || data instanceof String) {
return data
} else if (typeof data === 'object') {
switch(data.type) {
case 'sync':
if (data.metadata) {
if (data.metadata.err) {
throw data.metadata.err
}
return data.metadata
} else {
return data
}
case 'async':
if (options && options.openSocket) {
log.debug('Opening socket:', data.operation + '/websocket')
if (data.metadata && data.metadata.metadata && data.metadata.metadata.fds) {
let wsEndpoint = endpoint.startsWith('http') ? ('ws' + endpoint.slice(4)) : endpoint
let names = Object.keys(data.metadata.metadata.fds)
let sockets = {}
await Parallel.each(names, name => new Promise((resolve, reject) => {
try {
let wsc = new WebSocket(
wsEndpoint + data.operation + '/websocket?secret=' + data.metadata.metadata.fds[name],
null,
{ agent: agent }
)
wsc.on('open', () => resolve(wsc))
wsc.on('error', reject)
sockets[name] = wsc
} catch (ex) {
reject(ex)
}
}))
return sockets
} else {
throw "Unable to open web-socket"
}
} else {
log.debug('Forwarding:', data.operation + '/wait')
return await wrapLxdResponse(endpoint, axios.get(endpoint + data.operation + '/wait', { httpsAgent: agent }), options)
}
case 'error':
log.debug('LXD error', data.error)
throw data.error
}
}
}
function callLxd(method, endpoint, resource, data, options) {
let axiosConfig = assign({
method: method,
url: getUrl(endpoint, resource),
httpsAgent: agent,
data: data,
timeout: config.lxdTimeout
}, options || {})
log.debug(method, axiosConfig.url, data || '')
return wrapLxdResponse(endpoint, axios(axiosConfig), options)
}
exports.get = function (endpoint, resource, options) {
return callLxd('get', endpoint, resource, undefined, options)
}
exports.delete = function (endpoint, resource, options) {
return callLxd('delete', endpoint, resource, undefined, options)
}
exports.put = function (endpoint, resource, data, options) {
return callLxd('put', endpoint, resource, data, options)
}
exports.post = function (endpoint, resource, data, options) {
return callLxd('post', endpoint, resource, data, options)
}
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fstreplace.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/flags.h>
DEFINE_string(call_arc_labeling, "input",
"Which labels to make non-epsilon on the call arc. "
"One of: \"input\" (default), \"output\", \"both\", \"neither\"");
DEFINE_string(return_arc_labeling, "neither",
"Which labels to make non-epsilon on the return arc. "
"One of: \"input\", \"output\", \"both\", \"neither\" (default)");
DEFINE_int64(return_label, 0, "Label to put on return arc");
DEFINE_bool(epsilon_on_replace, false, "Call/return arcs are epsilon arcs?");
int fstreplace_main(int argc, char **argv);
int main(int argc, char **argv) { return fstreplace_main(argc, argv); }
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/pdtscript.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Convenience file for including all PDT operations at once, and/or
// registering them for new arc types.
#ifndef FST_EXTENSIONS_PDT_PDTSCRIPT_H_
#define FST_EXTENSIONS_PDT_PDTSCRIPT_H_
#include <algorithm>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/compose.h> // for ComposeOptions
#include <fst/util.h>
#include <fst/script/arg-packs.h>
#include <fst/script/fstscript.h>
#include <fst/script/shortest-path.h>
#include <fst/extensions/pdt/compose.h>
#include <fst/extensions/pdt/expand.h>
#include <fst/extensions/pdt/info.h>
#include <fst/extensions/pdt/replace.h>
#include <fst/extensions/pdt/reverse.h>
#include <fst/extensions/pdt/shortest-path.h>
namespace fst {
namespace script {
using PdtComposeArgs =
std::tuple<const FstClass &, const FstClass &,
const std::vector<LabelPair> &, MutableFstClass *,
const PdtComposeOptions &, bool>;
template <class Arc>
void PdtCompose(PdtComposeArgs *args) {
const Fst<Arc> &ifst1 = *(std::get<0>(*args).GetFst<Arc>());
const Fst<Arc> &ifst2 = *(std::get<1>(*args).GetFst<Arc>());
MutableFst<Arc> *ofst = std::get<3>(*args)->GetMutableFst<Arc>();
// In case Arc::Label is not the same as FstClass::Label, we make a
// copy. Truncation may occur if FstClass::Label has more precision than
// Arc::Label.
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens(
std::get<2>(*args).size());
std::copy(std::get<2>(*args).begin(), std::get<2>(*args).end(),
typed_parens.begin());
if (std::get<5>(*args)) {
Compose(ifst1, typed_parens, ifst2, ofst, std::get<4>(*args));
} else {
Compose(ifst1, ifst2, typed_parens, ofst, std::get<4>(*args));
}
}
void PdtCompose(const FstClass &ifst1, const FstClass &ifst2,
const std::vector<LabelPair> &parens,
MutableFstClass *ofst, const PdtComposeOptions &opts,
bool left_pdt);
struct PdtExpandOptions {
bool connect;
bool keep_parentheses;
const WeightClass &weight_threshold;
PdtExpandOptions(bool c, bool k, const WeightClass &w)
: connect(c), keep_parentheses(k), weight_threshold(w) {}
};
using PdtExpandArgs =
std::tuple<const FstClass &, const std::vector<LabelPair> &,
MutableFstClass *, const PdtExpandOptions &>;
template <class Arc>
void PdtExpand(PdtExpandArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>());
MutableFst<Arc> *ofst = std::get<2>(*args)->GetMutableFst<Arc>();
// In case Arc::Label is not the same as FstClass::Label, we make a
// copy. Truncation may occur if FstClass::Label has more precision than
// Arc::Label.
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens(
std::get<1>(*args).size());
std::copy(std::get<1>(*args).begin(), std::get<1>(*args).end(),
typed_parens.begin());
Expand(fst, typed_parens, ofst,
fst::PdtExpandOptions<Arc>(
std::get<3>(*args).connect, std::get<3>(*args).keep_parentheses,
*(std::get<3>(*args)
.weight_threshold.GetWeight<typename Arc::Weight>())));
}
void PdtExpand(const FstClass &ifst, const std::vector<LabelPair> &parens,
MutableFstClass *ofst, const PdtExpandOptions &opts);
void PdtExpand(const FstClass &ifst, const std::vector<LabelPair> &parens,
MutableFstClass *ofst, bool connect, bool keep_parentheses,
const WeightClass &weight_threshold);
using PdtReplaceArgs =
std::tuple<const std::vector<LabelFstClassPair> &, MutableFstClass *,
std::vector<LabelPair> *, int64_t, PdtParserType, int64_t,
const string &, const string &>;
template <class Arc>
void PdtReplace(PdtReplaceArgs *args) {
const auto &untyped_pairs = std::get<0>(*args);
auto size = untyped_pairs.size();
std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> typed_pairs(
size);
for (size_t i = 0; i < size; ++i) {
typed_pairs[i].first = untyped_pairs[i].first;
typed_pairs[i].second = untyped_pairs[i].second->GetFst<Arc>();
}
MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>();
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens;
const PdtReplaceOptions<Arc> opts(std::get<3>(*args), std::get<4>(*args),
std::get<5>(*args), std::get<6>(*args),
std::get<7>(*args));
Replace(typed_pairs, ofst, &typed_parens, opts);
// Copies typed parens into arg3.
std::get<2>(*args)->resize(typed_parens.size());
std::copy(typed_parens.begin(), typed_parens.end(),
std::get<2>(*args)->begin());
}
void PdtReplace(const std::vector<LabelFstClassPair> &pairs,
MutableFstClass *ofst, std::vector<LabelPair> *parens,
int64_t root, PdtParserType parser_type = PDT_LEFT_PARSER,
int64_t start_paren_labels = kNoLabel,
const string &left_paren_prefix = "(_",
const string &right_paren_prefix = "_)");
using PdtReverseArgs =
std::tuple<const FstClass &, const std::vector<LabelPair> &,
MutableFstClass *>;
template <class Arc>
void PdtReverse(PdtReverseArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>());
MutableFst<Arc> *ofst = std::get<2>(*args)->GetMutableFst<Arc>();
// In case Arc::Label is not the same as FstClass::Label, we make a
// copy. Truncation may occur if FstClass::Label has more precision than
// Arc::Label.
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens(
std::get<1>(*args).size());
std::copy(std::get<1>(*args).begin(), std::get<1>(*args).end(),
typed_parens.begin());
Reverse(fst, typed_parens, ofst);
}
void PdtReverse(const FstClass &ifst, const std::vector<LabelPair> &,
MutableFstClass *ofst);
// PDT SHORTESTPATH
struct PdtShortestPathOptions {
QueueType queue_type;
bool keep_parentheses;
bool path_gc;
PdtShortestPathOptions(QueueType qt = FIFO_QUEUE, bool kp = false,
bool gc = true)
: queue_type(qt), keep_parentheses(kp), path_gc(gc) {}
};
using PdtShortestPathArgs =
std::tuple<const FstClass &, const std::vector<LabelPair> &,
MutableFstClass *, const PdtShortestPathOptions &>;
template <class Arc>
void PdtShortestPath(PdtShortestPathArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>());
MutableFst<Arc> *ofst = std::get<2>(*args)->GetMutableFst<Arc>();
const PdtShortestPathOptions &opts = std::get<3>(*args);
// In case Arc::Label is not the same as FstClass::Label, we make a
// copy. Truncation may occur if FstClass::Label has more precision than
// Arc::Label.
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens(
std::get<1>(*args).size());
std::copy(std::get<1>(*args).begin(), std::get<1>(*args).end(),
typed_parens.begin());
switch (opts.queue_type) {
default:
FSTERROR() << "Unknown queue type: " << opts.queue_type;
case FIFO_QUEUE: {
using Queue = FifoQueue<typename Arc::StateId>;
fst::PdtShortestPathOptions<Arc, Queue> spopts(opts.keep_parentheses,
opts.path_gc);
ShortestPath(fst, typed_parens, ofst, spopts);
return;
}
case LIFO_QUEUE: {
using Queue = LifoQueue<typename Arc::StateId>;
fst::PdtShortestPathOptions<Arc, Queue> spopts(opts.keep_parentheses,
opts.path_gc);
ShortestPath(fst, typed_parens, ofst, spopts);
return;
}
case STATE_ORDER_QUEUE: {
using Queue = StateOrderQueue<typename Arc::StateId>;
fst::PdtShortestPathOptions<Arc, Queue> spopts(opts.keep_parentheses,
opts.path_gc);
ShortestPath(fst, typed_parens, ofst, spopts);
return;
}
}
}
void PdtShortestPath(const FstClass &ifst,
const std::vector<LabelPair> &parens, MutableFstClass *ofst,
const PdtShortestPathOptions &opts = PdtShortestPathOptions());
// PRINT INFO
using PrintPdtInfoArgs =
std::pair<const FstClass &, const std::vector<LabelPair> &>;
template <class Arc>
void PrintPdtInfo(PrintPdtInfoArgs *args) {
const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>());
// In case Arc::Label is not the same as FstClass::Label, we make a
// copy. Truncation may occur if FstClass::Label has more precision than
// Arc::Label.
std::vector<std::pair<typename Arc::Label, typename Arc::Label>> typed_parens(
std::get<1>(*args).size());
std::copy(std::get<1>(*args).begin(), std::get<1>(*args).end(),
typed_parens.begin());
PdtInfo<Arc> pdtinfo(fst, typed_parens);
PrintPdtInfo(pdtinfo);
}
void PrintPdtInfo(const FstClass &ifst, const std::vector<LabelPair> &parens);
} // namespace script
} // namespace fst
#define REGISTER_FST_PDT_OPERATIONS(ArcType) \
REGISTER_FST_OPERATION(PdtCompose, ArcType, PdtComposeArgs); \
REGISTER_FST_OPERATION(PdtExpand, ArcType, PdtExpandArgs); \
REGISTER_FST_OPERATION(PdtReplace, ArcType, PdtReplaceArgs); \
REGISTER_FST_OPERATION(PdtReverse, ArcType, PdtReverseArgs); \
REGISTER_FST_OPERATION(PdtShortestPath, ArcType, PdtShortestPathArgs); \
REGISTER_FST_OPERATION(PrintPdtInfo, ArcType, PrintPdtInfoArgs)
#endif // FST_EXTENSIONS_PDT_PDTSCRIPT_H_
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/state-table.h | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Classes for representing the mapping between state tuples and state IDs.
#ifndef FST_STATE_TABLE_H_
#define FST_STATE_TABLE_H_
#include <deque>
#include <utility>
#include <vector>
#include <fst/log.h>
#include <fst/bi-table.h>
#include <fst/expanded-fst.h>
#include <fst/filter-state.h>
namespace fst {
// State tables determine the bijective mapping between state tuples (e.g., in
// composition, triples of two FST states and a composition filter state) and
// their corresponding state IDs. They are classes, templated on state tuples,
// with the following interface:
//
// template <class T>
// class StateTable {
// public:
// using StateTuple = T;
//
// // Required constructors.
// StateTable();
//
// StateTable(const StateTable &);
//
// // Looks up state ID by tuple. If it doesn't exist, then add it.
// StateId FindState(const StateTuple &tuple);
//
// // Looks up state tuple by state ID.
// const StateTuple<StateId> &Tuple(StateId s) const;
//
// // # of stored tuples.
// StateId Size() const;
// };
//
// A state tuple has the form:
//
// template <class S>
// struct StateTuple {
// using StateId = S;
//
// // Required constructors.
//
// StateTuple();
//
// StateTuple(const StateTuple &tuple);
// };
// An implementation using a hash map for the tuple to state ID mapping. The
// state tuple T must support operator==.
template <class T, class H>
class HashStateTable : public HashBiTable<typename T::StateId, T, H> {
public:
using StateTuple = T;
using StateId = typename StateTuple::StateId;
using HashBiTable<StateId, StateTuple, H>::FindId;
using HashBiTable<StateId, StateTuple, H>::FindEntry;
using HashBiTable<StateId, StateTuple, H>::Size;
HashStateTable() : HashBiTable<StateId, StateTuple, H>() {}
explicit HashStateTable(size_t table_size)
: HashBiTable<StateId, StateTuple, H>(table_size) {}
StateId FindState(const StateTuple &tuple) { return FindId(tuple); }
const StateTuple &Tuple(StateId s) const { return FindEntry(s); }
};
// An implementation using a hash map for the tuple to state ID mapping. The
// state tuple T must support operator==.
template <class T, class H>
class CompactHashStateTable
: public CompactHashBiTable<typename T::StateId, T, H> {
public:
using StateTuple = T;
using StateId = typename StateTuple::StateId;
using CompactHashBiTable<StateId, StateTuple, H>::FindId;
using CompactHashBiTable<StateId, StateTuple, H>::FindEntry;
using CompactHashBiTable<StateId, StateTuple, H>::Size;
CompactHashStateTable() : CompactHashBiTable<StateId, StateTuple, H>() {}
explicit CompactHashStateTable(size_t table_size)
: CompactHashBiTable<StateId, StateTuple, H>(table_size) {}
StateId FindState(const StateTuple &tuple) { return FindId(tuple); }
const StateTuple &Tuple(StateId s) const { return FindEntry(s); }
};
// An implementation using a vector for the tuple to state mapping. It is
// passed a fingerprint functor that should fingerprint tuples uniquely to an
// integer that can used as a vector index. Normally, VectorStateTable
// constructs the fingerprint functor. Alternately, the user can pass this
// object, in which case the table takes ownership.
template <class T, class FP>
class VectorStateTable : public VectorBiTable<typename T::StateId, T, FP> {
public:
using StateTuple = T;
using StateId = typename StateTuple::StateId;
using VectorBiTable<StateId, StateTuple, FP>::FindId;
using VectorBiTable<StateId, StateTuple, FP>::FindEntry;
using VectorBiTable<StateId, StateTuple, FP>::Size;
using VectorBiTable<StateId, StateTuple, FP>::Fingerprint;
explicit VectorStateTable(FP *fingerprint = nullptr, size_t table_size = 0)
: VectorBiTable<StateId, StateTuple, FP>(fingerprint, table_size) {}
StateId FindState(const StateTuple &tuple) { return FindId(tuple); }
const StateTuple &Tuple(StateId s) const { return FindEntry(s); }
};
// An implementation using a vector and a compact hash table. The selection
// functor returns true for tuples to be hashed in the vector. The fingerprint
// functor should fingerprint tuples uniquely to an integer that can be used as
// a vector index. A hash functor is used when hashing tuples into the compact
// hash table.
template <class T, class Select, class FP, class H>
class VectorHashStateTable
: public VectorHashBiTable<typename T::StateId, T, Select, FP, H> {
public:
using StateTuple = T;
using StateId = typename StateTuple::StateId;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::FindId;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::FindEntry;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::Size;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::Selector;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::Fingerprint;
using VectorHashBiTable<StateId, StateTuple, Select, FP, H>::Hash;
VectorHashStateTable(Select *select, FP *fingerprint, H *hash,
size_t vector_size = 0, size_t tuple_size = 0)
: VectorHashBiTable<StateId, StateTuple, Select, FP, H>(
select, fingerprint, hash, vector_size, tuple_size) {}
StateId FindState(const StateTuple &tuple) { return FindId(tuple); }
const StateTuple &Tuple(StateId s) const { return FindEntry(s); }
};
// An implementation using a hash map to map from tuples to state IDs. This
// version permits erasing of states. The state tuple's default constructor
// must produce a tuple that will never be seen and the table must suppor
// operator==.
template <class T, class H>
class ErasableStateTable : public ErasableBiTable<typename T::StateId, T, H> {
public:
using StateTuple = T;
using StateId = typename StateTuple::StateId;
using ErasableBiTable<StateId, StateTuple, H>::FindId;
using ErasableBiTable<StateId, StateTuple, H>::FindEntry;
using ErasableBiTable<StateId, StateTuple, H>::Size;
using ErasableBiTable<StateId, StateTuple, H>::Erase;
ErasableStateTable() : ErasableBiTable<StateId, StateTuple, H>() {}
StateId FindState(const StateTuple &tuple) { return FindId(tuple); }
const StateTuple &Tuple(StateId s) const { return FindEntry(s); }
};
// The composition state table has the form:
//
// template <class Arc, class FilterState>
// class ComposeStateTable {
// public:
// using StateId = typename Arc::StateId;
//
// // Required constructors.
//
// ComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2);
// ComposeStateTable(const ComposeStateTable<Arc, FilterState> &table);
//
// // Looks up a state ID by tuple, adding it if doesn't exist.
// StateId FindState(const StateTuple &tuple);
//
// // Looks up a tuple by state ID.
// const ComposeStateTuple<StateId> &Tuple(StateId s) const;
//
// // The number of of stored tuples.
// StateId Size() const;
//
// // Return true if error was encountered.
// bool Error() const;
// };
//
// The following interface is used to represent the composition state.
//
// template <class S, class FS>
// class CompositionStateTuple {
// public:
// using StateId = typename StateId;
// using FS = FilterState;
//
// // Required constructors.
// StateTuple();
// StateTuple(StateId s1, StateId s2, const FilterState &fs);
//
// StateId StateId1() const;
// StateId StateId2() const;
//
// FilterState GetFilterState() const;
//
// std::pair<StateId, StateId> StatePair() const;
//
// size_t Hash() const;
//
// friend bool operator==(const StateTuple& x, const StateTuple &y);
// }
//
template <typename S, typename FS>
class DefaultComposeStateTuple {
public:
using StateId = S;
using FilterState = FS;
DefaultComposeStateTuple()
: state_pair_(kNoStateId, kNoStateId), fs_(FilterState::NoState()) {}
DefaultComposeStateTuple(StateId s1, StateId s2, const FilterState &fs)
: state_pair_(s1, s2), fs_(fs) {}
StateId StateId1() const { return state_pair_.first; }
StateId StateId2() const { return state_pair_.second; }
FilterState GetFilterState() const { return fs_; }
const std::pair<StateId, StateId> &StatePair() const { return state_pair_; }
friend bool operator==(const DefaultComposeStateTuple &x,
const DefaultComposeStateTuple &y) {
return (&x == &y) || (x.state_pair_ == y.state_pair_ && x.fs_ == y.fs_);
}
size_t Hash() const {
return static_cast<size_t>(StateId1()) +
static_cast<size_t>(StateId2()) * 7853u +
GetFilterState().Hash() * 7867u;
}
private:
std::pair<StateId, StateId> state_pair_;
FilterState fs_; // State of composition filter.
};
// Specialization for TrivialFilterState that does not explicitely store the
// filter state since it is always the unique non-blocking state.
template <typename S>
class DefaultComposeStateTuple<S, TrivialFilterState> {
public:
using StateId = S;
using FilterState = TrivialFilterState;
DefaultComposeStateTuple()
: state_pair_(kNoStateId, kNoStateId) {}
DefaultComposeStateTuple(StateId s1, StateId s2, const FilterState &)
: state_pair_(s1, s2) {}
StateId StateId1() const { return state_pair_.first; }
StateId StateId2() const { return state_pair_.second; }
FilterState GetFilterState() const { return FilterState(true); }
const std::pair<StateId, StateId> &StatePair() const { return state_pair_; }
friend bool operator==(const DefaultComposeStateTuple &x,
const DefaultComposeStateTuple &y) {
return (&x == &y) || (x.state_pair_ == y.state_pair_);
}
size_t Hash() const { return StateId1() + StateId2() * 7853; }
private:
std::pair<StateId, StateId> state_pair_;
};
// Hashing of composition state tuples.
template <typename T>
class ComposeHash {
public:
size_t operator()(const T &t) const { return t.Hash(); }
};
// A HashStateTable over composition tuples.
template <typename Arc, typename FilterState,
typename StateTuple =
DefaultComposeStateTuple<typename Arc::StateId, FilterState>,
typename StateTable =
CompactHashStateTable<StateTuple, ComposeHash<StateTuple>>>
class GenericComposeStateTable : public StateTable {
public:
using StateId = typename Arc::StateId;
GenericComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2) {}
GenericComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
size_t table_size)
: StateTable(table_size) {}
constexpr bool Error() const { return false; }
private:
GenericComposeStateTable &operator=(const GenericComposeStateTable &table) =
delete;
};
// Fingerprint for general composition tuples.
template <typename StateTuple>
class ComposeFingerprint {
public:
using StateId = typename StateTuple::StateId;
// Required but suboptimal constructor.
ComposeFingerprint() : mult1_(8192), mult2_(8192) {
LOG(WARNING) << "TupleFingerprint: # of FST states should be provided.";
}
// Constructor is provided the sizes of the input FSTs.
ComposeFingerprint(StateId nstates1, StateId nstates2)
: mult1_(nstates1), mult2_(nstates1 * nstates2) {}
size_t operator()(const StateTuple &tuple) {
return tuple.StateId1() + tuple.StateId2() * mult1_ +
tuple.GetFilterState().Hash() * mult2_;
}
private:
const std::ptrdiff_t mult1_;
const std::ptrdiff_t mult2_;
};
// Useful when the first composition state determines the tuple.
template <typename StateTuple>
class ComposeState1Fingerprint {
public:
size_t operator()(const StateTuple &tuple) { return tuple.StateId1(); }
};
// Useful when the second composition state determines the tuple.
template <typename StateTuple>
class ComposeState2Fingerprint {
public:
size_t operator()(const StateTuple &tuple) { return tuple.StateId2(); }
};
// A VectorStateTable over composition tuples. This can be used when the
// product of number of states in FST1 and FST2 (and the composition filter
// state hash) is manageable. If the FSTs are not expanded FSTs, they will
// first have their states counted.
template <typename Arc, typename StateTuple>
class ProductComposeStateTable
: public VectorStateTable<StateTuple, ComposeFingerprint<StateTuple>> {
public:
using StateId = typename Arc::StateId;
using StateTable =
VectorStateTable<StateTuple, ComposeFingerprint<StateTuple>>;
ProductComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
size_t table_size = 0)
: StateTable(new ComposeFingerprint<StateTuple>(CountStates(fst1),
CountStates(fst2)),
table_size) {}
ProductComposeStateTable(
const ProductComposeStateTable<Arc, StateTuple> &table)
: StateTable(new ComposeFingerprint<StateTuple>(table.Fingerprint())) {}
constexpr bool Error() const { return false; }
private:
ProductComposeStateTable &operator=(const ProductComposeStateTable &table) =
delete;
};
// A vector-backed table over composition tuples which can be used when the
// first FST is a string (i.e., satisfies kString property) and the second is
// deterministic and epsilon-free. It should be used with a composition filter
// that creates at most one filter state per tuple under these conditions (e.g.,
// SequenceComposeFilter or MatchComposeFilter).
template <typename Arc, typename StateTuple>
class StringDetComposeStateTable
: public VectorStateTable<StateTuple,
ComposeState1Fingerprint<StateTuple>> {
public:
using StateId = typename Arc::StateId;
using StateTable =
VectorStateTable<StateTuple, ComposeState1Fingerprint<StateTuple>>;
StringDetComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2)
: error_(false) {
static constexpr auto props2 = kIDeterministic | kNoIEpsilons;
if (fst1.Properties(kString, true) != kString) {
FSTERROR() << "StringDetComposeStateTable: 1st FST is not a string";
error_ = true;
} else if (fst2.Properties(props2, true) != props2) {
FSTERROR() << "StringDetComposeStateTable: 2nd FST is not deterministic "
"and epsilon-free";
error_ = true;
}
}
StringDetComposeStateTable(
const StringDetComposeStateTable<Arc, StateTuple> &table)
: StateTable(table), error_(table.error_) {}
bool Error() const { return error_; }
private:
bool error_;
StringDetComposeStateTable &operator=(const StringDetComposeStateTable &) =
delete;
};
// A vector-backed table over composition tuples which can be used when the
// first FST is deterministic and epsilon-free and the second is a string (i.e.,
// satisfies kString). It should be used with a composition filter that creates
// at most one filter state per tuple under these conditions (e.g.,
// SequenceComposeFilter or MatchComposeFilter).
template <typename Arc, typename StateTuple>
class DetStringComposeStateTable
: public VectorStateTable<StateTuple,
ComposeState2Fingerprint<StateTuple>> {
public:
using StateId = typename Arc::StateId;
using StateTable =
VectorStateTable<StateTuple, ComposeState2Fingerprint<StateTuple>>;
DetStringComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2)
: error_(false) {
static constexpr auto props = kODeterministic | kNoOEpsilons;
if (fst1.Properties(props, true) != props) {
FSTERROR() << "StringDetComposeStateTable: 1st FST is not "
<< "input-deterministic and epsilon-free";
error_ = true;
} else if (fst2.Properties(kString, true) != kString) {
FSTERROR() << "DetStringComposeStateTable: 2nd FST is not a string";
error_ = true;
}
}
DetStringComposeStateTable(
const DetStringComposeStateTable<Arc, StateTuple> &table)
: StateTable(table), error_(table.error_) {}
bool Error() const { return error_; }
private:
bool error_;
DetStringComposeStateTable &operator=(const DetStringComposeStateTable &) =
delete;
};
// An erasable table over composition tuples. The Erase(StateId) method can be
// called if the user either is sure that composition will never return to that
// tuple or doesn't care that if it does, it is assigned a new state ID.
template <typename Arc, typename StateTuple>
class ErasableComposeStateTable
: public ErasableStateTable<StateTuple, ComposeHash<StateTuple>> {
public:
ErasableComposeStateTable(const Fst<Arc> &fst1, const Fst<Arc> &fst2) {}
constexpr bool Error() const { return false; }
private:
ErasableComposeStateTable &operator=(const ErasableComposeStateTable &table) =
delete;
};
} // namespace fst
#endif // FST_STATE_TABLE_H_
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/reverse.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <fst/script/fst-class.h>
#include <fst/script/reverse.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void Reverse(const FstClass &ifst, MutableFstClass *ofst,
bool require_superinitial) {
if (!internal::ArcTypesMatch(ifst, *ofst, "Reverse")) {
ofst->SetProperties(kError, kError);
return;
}
ReverseArgs args(ifst, ofst, require_superinitial);
Apply<Operation<ReverseArgs>>("Reverse", ifst.ArcType(), &args);
}
REGISTER_FST_OPERATION(Reverse, StdArc, ReverseArgs);
REGISTER_FST_OPERATION(Reverse, LogArc, ReverseArgs);
REGISTER_FST_OPERATION(Reverse, Log64Arc, ReverseArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src | coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/lib/symbol-table-ops.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
#include <fst/symbol-table-ops.h>
#include <string>
namespace fst {
SymbolTable *MergeSymbolTable(const SymbolTable &left, const SymbolTable &right,
bool *right_relabel_output) {
// MergeSymbolTable detects several special cases. It will return a reference
// copied version of SymbolTable of left or right if either symbol table is
// a superset of the other.
std::unique_ptr<SymbolTable> merged(
new SymbolTable("merge_" + left.Name() + "_" + right.Name()));
// Copies everything from the left symbol table.
bool left_has_all = true;
bool right_has_all = true;
bool relabel = false;
for (SymbolTableIterator liter(left); !liter.Done(); liter.Next()) {
merged->AddSymbol(liter.Symbol(), liter.Value());
if (right_has_all) {
int64_t key = right.Find(liter.Symbol());
if (key == -1) {
right_has_all = false;
} else if (!relabel && key != liter.Value()) {
relabel = true;
}
}
}
if (right_has_all) {
if (right_relabel_output) *right_relabel_output = relabel;
return right.Copy();
}
// add all symbols we can from right symbol table
std::vector<string> conflicts;
for (SymbolTableIterator riter(right); !riter.Done(); riter.Next()) {
int64_t key = merged->Find(riter.Symbol());
if (key != -1) {
// Symbol already exists, maybe with different value
if (key != riter.Value()) relabel = true;
continue;
}
// Symbol doesn't exist from left
left_has_all = false;
if (!merged->Find(riter.Value()).empty()) {
// we can't add this where we want to, add it later, in order
conflicts.push_back(riter.Symbol());
continue;
}
// there is a hole and we can add this symbol with its id
merged->AddSymbol(riter.Symbol(), riter.Value());
}
if (right_relabel_output) *right_relabel_output = relabel;
if (left_has_all) return left.Copy();
// Add all symbols that conflicted, in order
for (const auto &conflict : conflicts) merged->AddSymbol(conflict);
return merged.release();
}
SymbolTable *CompactSymbolTable(const SymbolTable &syms) {
std::map<int64_t, string> sorted;
SymbolTableIterator stiter(syms);
for (; !stiter.Done(); stiter.Next()) {
sorted[stiter.Value()] = stiter.Symbol();
}
auto *compact = new SymbolTable(syms.Name() + "_compact");
int64_t newkey = 0;
for (const auto &kv : sorted) compact->AddSymbol(kv.second, newkey++);
return compact;
}
SymbolTable *FstReadSymbols(const string &filename, bool input_symbols) {
std::ifstream in(filename, std::ios_base::in | std::ios_base::binary);
if (!in) {
LOG(ERROR) << "FstReadSymbols: Can't open file " << filename;
return nullptr;
}
FstHeader hdr;
if (!hdr.Read(in, filename)) {
LOG(ERROR) << "FstReadSymbols: Couldn't read header from " << filename;
return nullptr;
}
if (hdr.GetFlags() & FstHeader::HAS_ISYMBOLS) {
std::unique_ptr<SymbolTable> isymbols(SymbolTable::Read(in, filename));
if (isymbols == nullptr) {
LOG(ERROR) << "FstReadSymbols: Couldn't read input symbols from "
<< filename;
return nullptr;
}
if (input_symbols) return isymbols.release();
}
if (hdr.GetFlags() & FstHeader::HAS_OSYMBOLS) {
std::unique_ptr<SymbolTable> osymbols(SymbolTable::Read(in, filename));
if (osymbols == nullptr) {
LOG(ERROR) << "FstReadSymbols: Couldn't read output symbols from "
<< filename;
return nullptr;
}
if (!input_symbols) return osymbols.release();
}
LOG(ERROR) << "FstReadSymbols: The file " << filename
<< " doesn't contain the requested symbols";
return nullptr;
}
bool AddAuxiliarySymbols(const string &prefix, int64_t start_label,
int64_t nlabels, SymbolTable *syms) {
for (int64_t i = 0; i < nlabels; ++i) {
auto index = i + start_label;
if (index != syms->AddSymbol(prefix + std::to_string(i), index)) {
FSTERROR() << "AddAuxiliarySymbols: Symbol table clash";
return false;
}
}
return true;
}
} // namespace fst
| 0 |
coqui_public_repos/stt-model-manager | coqui_public_repos/stt-model-manager/test/server.test.js | const chai = require('chai');
const chaiHttp = require('chai-http');
const should = require('should-http');
chai.use(chaiHttp);
const expect = chai.expect;
const fs = require('fs');
const io = require('socket.io-client');
const url = 'http://localhost:4000';
let audioFile1 = process.env.HOME + '/STT/audio/2830-3980-0043.wav';
let audioFile2 = process.env.HOME + '/STT/audio/8455-210777-0068.wav';
let audioFile3 = process.env.HOME + '/STT/audio/4507-16021-0012.wav';
let socket;
before(function(done) {
console.log('before');
socket = io.connect(url, {});
done();
});
describe('GET /', function() {
it('should return web-microphone-websocket', function(done) {
chai.request(url)
.get('/')
.end(function(err, res){
res.should.have.status(200);
expect(res.text).to.be.equal('web-microphone-websocket');
done();
});
});
});
describe('Websocket Audio', function() {
it('audioFile1: experience proof this', function(done) {
socket.once('recognize', (results) => {
expect(results.text).to.be.equal('experience proof this');
done();
});
fs.createReadStream(audioFile1, {highWaterMark: 4096})
.on('data', function (chunk) {
socket.emit('microphone-data', chunk);
})
.on('end', function () {
socket.emit('microphone-end');
});
});
it('audioFile2: your power is sufficient i said', function(done) {
socket.once('recognize', (results) => {
expect(results.text).to.be.equal('your power is sufficient i said');
done();
});
fs.createReadStream(audioFile2, {highWaterMark: 4096})
.on('data', function (chunk) {
socket.emit('microphone-data', chunk);
})
.on('end', function () {
socket.emit('microphone-end');
});
});
it('audioFile3: why should one halt on the way', function(done) {
socket.once('recognize', (results) => {
expect(results.text).to.be.equal('why should one halt on the way');
done();
});
fs.createReadStream(audioFile3, {highWaterMark: 4096})
.on('data', function (chunk) {
socket.emit('microphone-data', chunk);
})
.on('end', function () {
socket.emit('microphone-end');
});
});
});
| 0 |
coqui_public_repos/STT-examples | coqui_public_repos/STT-examples/django_api_streaming/entrypoint.sh | #!/bin/bash
echo "Running command '$*'"
exec su -p ${PYTHON_RUN_USER} -s /bin/bash -c "$*"
| 0 |
coqui_public_repos/TTS/recipes/vctk | coqui_public_repos/TTS/recipes/vctk/yourtts/train_yourtts.py | import os
import torch
from trainer import Trainer, TrainerArgs
from TTS.bin.compute_embeddings import compute_embeddings
from TTS.bin.resample import resample_files
from TTS.config.shared_configs import BaseDatasetConfig
from TTS.tts.configs.vits_config import VitsConfig
from TTS.tts.datasets import load_tts_samples
from TTS.tts.models.vits import CharactersConfig, Vits, VitsArgs, VitsAudioConfig
from TTS.utils.downloaders import download_vctk
torch.set_num_threads(24)
# pylint: disable=W0105
"""
This recipe replicates the first experiment proposed in the YourTTS paper (https://arxiv.org/abs/2112.02418).
YourTTS model is based on the VITS model however it uses external speaker embeddings extracted from a pre-trained speaker encoder and has small architecture changes.
In addition, YourTTS can be trained in multilingual data, however, this recipe replicates the single language training using the VCTK dataset.
If you are interested in multilingual training, we have commented on parameters on the VitsArgs class instance that should be enabled for multilingual training.
In addition, you will need to add the extra datasets following the VCTK as an example.
"""
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
# Name of the run for the Trainer
RUN_NAME = "YourTTS-EN-VCTK"
# Path where you want to save the models outputs (configs, checkpoints and tensorboard logs)
OUT_PATH = os.path.dirname(os.path.abspath(__file__)) # "/raid/coqui/Checkpoints/original-YourTTS/"
# If you want to do transfer learning and speedup your training you can set here the path to the original YourTTS model
RESTORE_PATH = None # "/root/.local/share/tts/tts_models--multilingual--multi-dataset--your_tts/model_file.pth"
# This paramter is useful to debug, it skips the training epochs and just do the evaluation and produce the test sentences
SKIP_TRAIN_EPOCH = False
# Set here the batch size to be used in training and evaluation
BATCH_SIZE = 32
# Training Sampling rate and the target sampling rate for resampling the downloaded dataset (Note: If you change this you might need to redownload the dataset !!)
# Note: If you add new datasets, please make sure that the dataset sampling rate and this parameter are matching, otherwise resample your audios
SAMPLE_RATE = 16000
# Max audio length in seconds to be used in training (every audio bigger than it will be ignored)
MAX_AUDIO_LEN_IN_SECONDS = 10
### Download VCTK dataset
VCTK_DOWNLOAD_PATH = os.path.join(CURRENT_PATH, "VCTK")
# Define the number of threads used during the audio resampling
NUM_RESAMPLE_THREADS = 10
# Check if VCTK dataset is not already downloaded, if not download it
if not os.path.exists(VCTK_DOWNLOAD_PATH):
print(">>> Downloading VCTK dataset:")
download_vctk(VCTK_DOWNLOAD_PATH)
resample_files(VCTK_DOWNLOAD_PATH, SAMPLE_RATE, file_ext="flac", n_jobs=NUM_RESAMPLE_THREADS)
# init configs
vctk_config = BaseDatasetConfig(
formatter="vctk",
dataset_name="vctk",
meta_file_train="",
meta_file_val="",
path=VCTK_DOWNLOAD_PATH,
language="en",
ignored_speakers=[
"p261",
"p225",
"p294",
"p347",
"p238",
"p234",
"p248",
"p335",
"p245",
"p326",
"p302",
], # Ignore the test speakers to full replicate the paper experiment
)
# Add here all datasets configs, in our case we just want to train with the VCTK dataset then we need to add just VCTK. Note: If you want to add new datasets, just add them here and it will automatically compute the speaker embeddings (d-vectors) for this new dataset :)
DATASETS_CONFIG_LIST = [vctk_config]
### Extract speaker embeddings
SPEAKER_ENCODER_CHECKPOINT_PATH = (
"https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar"
)
SPEAKER_ENCODER_CONFIG_PATH = "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json"
D_VECTOR_FILES = [] # List of speaker embeddings/d-vectors to be used during the training
# Iterates all the dataset configs checking if the speakers embeddings are already computated, if not compute it
for dataset_conf in DATASETS_CONFIG_LIST:
# Check if the embeddings weren't already computed, if not compute it
embeddings_file = os.path.join(dataset_conf.path, "speakers.pth")
if not os.path.isfile(embeddings_file):
print(f">>> Computing the speaker embeddings for the {dataset_conf.dataset_name} dataset")
compute_embeddings(
SPEAKER_ENCODER_CHECKPOINT_PATH,
SPEAKER_ENCODER_CONFIG_PATH,
embeddings_file,
old_speakers_file=None,
config_dataset_path=None,
formatter_name=dataset_conf.formatter,
dataset_name=dataset_conf.dataset_name,
dataset_path=dataset_conf.path,
meta_file_train=dataset_conf.meta_file_train,
meta_file_val=dataset_conf.meta_file_val,
disable_cuda=False,
no_eval=False,
)
D_VECTOR_FILES.append(embeddings_file)
# Audio config used in training.
audio_config = VitsAudioConfig(
sample_rate=SAMPLE_RATE,
hop_length=256,
win_length=1024,
fft_size=1024,
mel_fmin=0.0,
mel_fmax=None,
num_mels=80,
)
# Init VITSArgs setting the arguments that are needed for the YourTTS model
model_args = VitsArgs(
d_vector_file=D_VECTOR_FILES,
use_d_vector_file=True,
d_vector_dim=512,
num_layers_text_encoder=10,
speaker_encoder_model_path=SPEAKER_ENCODER_CHECKPOINT_PATH,
speaker_encoder_config_path=SPEAKER_ENCODER_CONFIG_PATH,
resblock_type_decoder="2", # In the paper, we accidentally trained the YourTTS using ResNet blocks type 2, if you like you can use the ResNet blocks type 1 like the VITS model
# Useful parameters to enable the Speaker Consistency Loss (SCL) described in the paper
# use_speaker_encoder_as_loss=True,
# Useful parameters to enable multilingual training
# use_language_embedding=True,
# embedded_language_dim=4,
)
# General training config, here you can change the batch size and others useful parameters
config = VitsConfig(
output_path=OUT_PATH,
model_args=model_args,
run_name=RUN_NAME,
project_name="YourTTS",
run_description="""
- Original YourTTS trained using VCTK dataset
""",
dashboard_logger="tensorboard",
logger_uri=None,
audio=audio_config,
batch_size=BATCH_SIZE,
batch_group_size=48,
eval_batch_size=BATCH_SIZE,
num_loader_workers=8,
eval_split_max_size=256,
print_step=50,
plot_step=100,
log_model_step=1000,
save_step=5000,
save_n_checkpoints=2,
save_checkpoints=True,
target_loss="loss_1",
print_eval=False,
use_phonemes=False,
phonemizer="espeak",
phoneme_language="en",
compute_input_seq_cache=True,
add_blank=True,
text_cleaner="multilingual_cleaners",
characters=CharactersConfig(
characters_class="TTS.tts.models.vits.VitsCharacters",
pad="_",
eos="&",
bos="*",
blank=None,
characters="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
punctuations="!'(),-.:;? ",
phonemes="",
is_unique=True,
is_sorted=True,
),
phoneme_cache_path=None,
precompute_num_workers=12,
start_by_longest=True,
datasets=DATASETS_CONFIG_LIST,
cudnn_benchmark=False,
max_audio_len=SAMPLE_RATE * MAX_AUDIO_LEN_IN_SECONDS,
mixed_precision=False,
test_sentences=[
[
"It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
"VCTK_p277",
None,
"en",
],
[
"Be a voice, not an echo.",
"VCTK_p239",
None,
"en",
],
[
"I'm sorry Dave. I'm afraid I can't do that.",
"VCTK_p258",
None,
"en",
],
[
"This cake is great. It's so delicious and moist.",
"VCTK_p244",
None,
"en",
],
[
"Prior to November 22, 1963.",
"VCTK_p305",
None,
"en",
],
],
# Enable the weighted sampler
use_weighted_sampler=True,
# Ensures that all speakers are seen in the training batch equally no matter how many samples each speaker has
weighted_sampler_attrs={"speaker_name": 1.0},
weighted_sampler_multipliers={},
# It defines the Speaker Consistency Loss (SCL) α to 9 like the paper
speaker_encoder_loss_alpha=9.0,
)
# Load all the datasets samples and split traning and evaluation sets
train_samples, eval_samples = load_tts_samples(
config.datasets,
eval_split=True,
eval_split_max_size=config.eval_split_max_size,
eval_split_size=config.eval_split_size,
)
# Init the model
model = Vits.init_from_config(config)
# Init the trainer and 🚀
trainer = Trainer(
TrainerArgs(restore_path=RESTORE_PATH, skip_train_epoch=SKIP_TRAIN_EPOCH),
config,
output_path=OUT_PATH,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
)
trainer.fit()
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/AUTHORS | Principal Contacts:
Cyril Allauzen <allauzen@google.com>
Michael Riley <riley@google.com>
Contributors:
These contributions range from fundamental algorithmic contributions (e.g.,
Mehryar Mohri) to implementation of core components and extensions.
Tom Bagby
Dan Bikel
Kyle Gorman
Martin Jansche
Boulos Harb
Mehryar Mohri
Dan Povey
Kasturi Raghavan
Jacob Ratkiewicz
Jesse Rosenstock
Johan Schalkwyk
Masha Shugrina
Wojtek Skut
Jeffrey Sorensen
Richard Sproat
Ananda Theertha Suresh
Terry Tai
Ke Wu
| 0 |
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src | coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/draw.cc | // See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
#include <ostream>
#include <string>
#include <fst/script/draw.h>
#include <fst/script/fst-class.h>
#include <fst/script/script-impl.h>
namespace fst {
namespace script {
void DrawFst(const FstClass &fst, const SymbolTable *isyms,
const SymbolTable *osyms, const SymbolTable *ssyms, bool accep,
const string &title, float width, float height, bool portrait,
bool vertical, float ranksep, float nodesep, int fontsize,
int precision, const string &float_format, bool show_weight_one,
std::ostream *ostrm, const string &dest) {
FstDrawerArgs args(fst, isyms, osyms, ssyms, accep, title, width, height,
portrait, vertical, ranksep, nodesep, fontsize, precision,
float_format, show_weight_one, ostrm, dest);
Apply<Operation<FstDrawerArgs>>("DrawFst", fst.ArcType(), &args);
}
REGISTER_FST_OPERATION(DrawFst, StdArc, FstDrawerArgs);
REGISTER_FST_OPERATION(DrawFst, LogArc, FstDrawerArgs);
REGISTER_FST_OPERATION(DrawFst, Log64Arc, FstDrawerArgs);
} // namespace script
} // namespace fst
| 0 |
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core | coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common/parse_string.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <locale>
#include <sstream>
#include <type_traits>
#include "core/common/common.h"
namespace onnxruntime {
/**
* Tries to parse a value from an entire string.
*/
template <typename T>
bool TryParseStringWithClassicLocale(const std::string& str, T& value) {
ORT_IF_CONSTEXPR (std::is_integral<T>::value && std::is_unsigned<T>::value) {
// if T is unsigned integral type, reject negative values which will wrap
if (!str.empty() && str[0] == '-') {
return false;
}
}
// don't allow leading whitespace
if (!str.empty() && std::isspace(str[0], std::locale::classic())) {
return false;
}
std::istringstream is{str};
is.imbue(std::locale::classic());
T parsed_value{};
const bool parse_successful =
is >> parsed_value &&
is.get() == std::istringstream::traits_type::eof(); // don't allow trailing characters
if (!parse_successful) {
return false;
}
value = std::move(parsed_value);
return true;
}
inline bool TryParseStringWithClassicLocale(const std::string& str, std::string& value) {
value = str;
return true;
}
inline bool TryParseStringWithClassicLocale(const std::string& str, bool& value) {
if (str == "0" || str == "False" || str == "false") {
value = false;
return true;
}
if (str == "1" || str == "True" || str == "true") {
value = true;
return true;
}
return false;
}
/**
* Parses a value from an entire string.
*/
template <typename T>
Status ParseStringWithClassicLocale(const std::string& s, T& value) {
ORT_RETURN_IF_NOT(TryParseStringWithClassicLocale(s, value), "Failed to parse value: \"", value, "\"");
return Status::OK();
}
/**
* Parses a value from an entire string.
*/
template <typename T>
T ParseStringWithClassicLocale(const std::string& s) {
T value{};
ORT_THROW_IF_ERROR(ParseStringWithClassicLocale(s, value));
return value;
}
} // namespace onnxruntime
| 0 |