repo_id
stringlengths
18
103
file_path
stringlengths
30
136
content
stringlengths
2
3.36M
__index_level_0__
int64
0
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_37_tflite_8k-linux-amd64-prod-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-tflite-opt" args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python_tflite-tests-prod.sh 3.7.6:m 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 TFLite Python v3.7 prod tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on Python v3.7 on prod model, TFLite, optimized version (8kHz)"
0
coqui_public_repos/STT/native_client/dotnet
coqui_public_repos/STT/native_client/dotnet/STTWPF/MainWindow.xaml
<Window x:Class="STTWPF.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" Title="STT client" Width="800" Height="600" Loaded="Window_Loaded" WindowStartupLocation="CenterScreen" mc:Ignorable="d"> <Grid> <Grid.RowDefinitions> <RowDefinition Height="222" /> <RowDefinition /> </Grid.RowDefinitions> <TextBox Grid.Row="1" Margin="10,36,10,10" FontSize="16px" Text="{Binding Transcription, Mode=OneWay}" TextWrapping="Wrap" /> <Label Grid.Row="1" Height="26" Margin="10,5,10,0" VerticalAlignment="Top" Content="Results:" /> <Label Height="26" Margin="10,10,10,0" VerticalAlignment="Top" Content="Select an audio file to transcript:" /> <TextBox Height="23" Margin="10,41,10,0" VerticalAlignment="Top" Text="{Binding AudioFilePath, Mode=TwoWay}" TextWrapping="Wrap" /> <Button Width="80" Height="25" Margin="10,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding SelectFileCommand}" Content="Open file" /> <Button Width="82" Height="25" Margin="95,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding EnableExternalScorerCommand}" Content="Enable external scorer" /> <Button Width="75" Height="25" Margin="182,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding InferenceFromFileCommand}" Content="Transcript" /> <Label Height="30" Margin="10,99,10,0" VerticalAlignment="Top" Content="{Binding StatusMessage, Mode=OneWay}" /> <Label Height="26" Margin="10,158,10,0" VerticalAlignment="Top" Content="Select an audio input:" /> <ComboBox Height="23" Margin="20,189,186,0" VerticalAlignment="Top" DisplayMemberPath="FriendlyName" ItemsSource="{Binding AvailableRecordDevices, Mode=TwoWay}" SelectedIndex="0" SelectedItem="{Binding SelectedDevice, Mode=TwoWay}" /> <Button Width="91" Height="23" Margin="0,0,90,10" HorizontalAlignment="Right" VerticalAlignment="Bottom" Command="{Binding StartRecordingCommand}" Content="Record" IsEnabled="{Binding EnableStartRecord, Mode=OneWay}" /> <Button Width="75" Height="23" Margin="0,0,10,10" HorizontalAlignment="Right" VerticalAlignment="Bottom" Command="{Binding StopRecordingCommand}" Content="Stop" IsEnabled="{Binding EnableStopRecord, Mode=OneWay}" /> </Grid> </Window>
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/info.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <string> #include <fst/script/fst-class.h> #include <fst/script/info.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void PrintFstInfo(const FstClass &fst, bool test_properties, const string &arc_filter, const string &info_type, bool pipe, bool verify) { InfoArgs args(fst, test_properties, arc_filter, info_type, pipe, verify); Apply<Operation<InfoArgs>>("PrintFstInfo", fst.ArcType(), &args); } void GetFstInfo(const FstClass &fst, bool test_properties, const string &arc_filter, const string &info_type, bool verify, FstInfo *result) { GetInfoArgs args(fst, test_properties, arc_filter, info_type, verify, result); Apply<Operation<GetInfoArgs>>("GetFstInfo", fst.ArcType(), &args); } REGISTER_FST_OPERATION(PrintFstInfo, StdArc, InfoArgs); REGISTER_FST_OPERATION(PrintFstInfo, LogArc, InfoArgs); REGISTER_FST_OPERATION(PrintFstInfo, Log64Arc, InfoArgs); REGISTER_FST_OPERATION(GetFstInfo, StdArc, GetInfoArgs); REGISTER_FST_OPERATION(GetFstInfo, LogArc, GetInfoArgs); REGISTER_FST_OPERATION(GetFstInfo, Log64Arc, GetInfoArgs); } // namespace script } // namespace fst
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/integer_to_string.hh
#ifndef UTIL_INTEGER_TO_STRING_H #define UTIL_INTEGER_TO_STRING_H #include <cstddef> #include <stdint.h> namespace util { /* These functions convert integers to strings and return the end pointer. */ char *ToString(uint32_t value, char *to); char *ToString(uint64_t value, char *to); // Implemented as wrappers to above char *ToString(int32_t value, char *to); char *ToString(int64_t value, char *to); // Calls the 32-bit versions for now. char *ToString(uint16_t value, char *to); char *ToString(int16_t value, char *to); char *ToString(const void *value, char *to); inline char *ToString(bool value, char *to) { *to++ = '0' + value; return to; } // How many bytes to reserve in the buffer for these strings: // g++ 4.9.1 doesn't work with this: // static const std::size_t kBytes = 5; // So use enum. template <class T> struct ToStringBuf; template <> struct ToStringBuf<bool> { enum { kBytes = 1 }; }; template <> struct ToStringBuf<uint16_t> { enum { kBytes = 5 }; }; template <> struct ToStringBuf<int16_t> { enum { kBytes = 6 }; }; template <> struct ToStringBuf<uint32_t> { enum { kBytes = 10 }; }; template <> struct ToStringBuf<int32_t> { enum { kBytes = 11 }; }; template <> struct ToStringBuf<uint64_t> { enum { kBytes = 20 }; }; template <> struct ToStringBuf<int64_t> { // Not a typo. 2^63 has 19 digits. enum { kBytes = 20 }; }; template <> struct ToStringBuf<const void*> { // Either 18 on 64-bit or 10 on 32-bit. enum { kBytes = sizeof(const void*) * 2 + 2 }; }; // Maximum over this and float. enum { kToStringMaxBytes = 20 }; } // namespace util #endif // UTIL_INTEGER_TO_STRING_H
0
coqui_public_repos/STT-models/hakha-chin/itml
coqui_public_repos/STT-models/hakha-chin/itml/v0.1.1/alphabet.txt
a b c d e f g h i j k l m n o p q r s t u v w y z ṭ
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/register.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_REGISTER_H_ #define FST_SCRIPT_REGISTER_H_ #include <istream> #include <string> #include <fst/generic-register.h> #include <fst/script/fst-class.h> #include <fst/script/weight-class.h> // Holds methods and classes responsible for maintaining // the register for FstClass arc types. namespace fst { namespace script { // Registers for reading and converting various kinds of FST classes. // This class definition is to avoid a nested class definition inside the // IORegistration struct. template <class Reader, class Creator, class Converter> struct FstClassRegEntry { Reader reader; Creator creator; Converter converter; FstClassRegEntry(Reader r, Creator cr, Converter co) : reader(r), creator(cr), converter(co) {} FstClassRegEntry() : reader(nullptr), creator(nullptr), converter(nullptr) {} }; template <class Reader, class Creator, class Converter> class FstClassIORegister : public GenericRegister<string, FstClassRegEntry<Reader, Creator, Converter>, FstClassIORegister<Reader, Creator, Converter>> { public: Reader GetReader(const string &arc_type) const { return this->GetEntry(arc_type).reader; } Creator GetCreator(const string &arc_type) const { return this->GetEntry(arc_type).creator; } Converter GetConverter(const string &arc_type) const { return this->GetEntry(arc_type).converter; } protected: string ConvertKeyToSoFilename(const string &key) const final { string legal_type(key); ConvertToLegalCSymbol(&legal_type); return legal_type + "-arc.so"; } }; // Struct containing everything needed to register a particular type // of FST class (e.g., a plain FstClass, or a MutableFstClass, etc.). template <class FstClassType> struct IORegistration { using Reader = FstClassType *(*)(std::istream &stream, const FstReadOptions &opts); using Creator = FstClassImplBase *(*)(); using Converter = FstClassImplBase *(*)(const FstClass &other); using Entry = FstClassRegEntry<Reader, Creator, Converter>; // FST class Register. using Register = FstClassIORegister<Reader, Creator, Converter>; // FST class Register-er. using Registerer = GenericRegisterer<FstClassIORegister<Reader, Creator, Converter>>; }; #define REGISTER_FST_CLASS(Class, Arc) \ static IORegistration<Class>::Registerer Class##_##Arc##_registerer( \ Arc::Type(), \ IORegistration<Class>::Entry(Class::Read<Arc>, Class::Create<Arc>, \ Class::Convert<Arc>)) #define REGISTER_FST_CLASSES(Arc) \ REGISTER_FST_CLASS(FstClass, Arc); \ REGISTER_FST_CLASS(MutableFstClass, Arc); \ REGISTER_FST_CLASS(VectorFstClass, Arc); } // namespace script } // namespace fst #endif // FST_SCRIPT_REGISTER_H_
0
coqui_public_repos/TTS/TTS
coqui_public_repos/TTS/TTS/bin/find_unique_chars.py
"""Find all the unique characters in a dataset""" import argparse from argparse import RawTextHelpFormatter from TTS.config import load_config from TTS.tts.datasets import load_tts_samples def main(): # pylint: disable=bad-option-value parser = argparse.ArgumentParser( description="""Find all the unique characters or phonemes in a dataset.\n\n""" """ Example runs: python TTS/bin/find_unique_chars.py --config_path config.json """, formatter_class=RawTextHelpFormatter, ) parser.add_argument("--config_path", type=str, help="Path to dataset config file.", required=True) args = parser.parse_args() c = load_config(args.config_path) # load all datasets train_items, eval_items = load_tts_samples( c.datasets, eval_split=True, eval_split_max_size=c.eval_split_max_size, eval_split_size=c.eval_split_size ) items = train_items + eval_items texts = "".join(item["text"] for item in items) chars = set(texts) lower_chars = filter(lambda c: c.islower(), chars) chars_force_lower = [c.lower() for c in chars] chars_force_lower = set(chars_force_lower) print(f" > Number of unique characters: {len(chars)}") print(f" > Unique characters: {''.join(sorted(chars))}") print(f" > Unique lower characters: {''.join(sorted(lower_chars))}") print(f" > Unique all forced to lower characters: {''.join(sorted(chars_force_lower))}") if __name__ == "__main__": main()
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests/test_tacotron_model.py
import copy import os import unittest import torch from torch import nn, optim from tests import get_tests_input_path from TTS.tts.configs.shared_configs import CapacitronVAEConfig, GSTConfig from TTS.tts.configs.tacotron_config import TacotronConfig from TTS.tts.layers.losses import L1LossMasked from TTS.tts.models.tacotron import Tacotron from TTS.utils.audio import AudioProcessor # pylint: disable=unused-variable torch.manual_seed(1) use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") config_global = TacotronConfig(num_chars=32, num_speakers=5, out_channels=513, decoder_output_dim=80) ap = AudioProcessor(**config_global.audio) WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") def count_parameters(model): r"""Count number of trainable parameters in a network""" return sum(p.numel() for p in model.parameters() if p.requires_grad) class TacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): config = config_global.copy() config.use_speaker_embedding = False config.num_speakers = 1 input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8,)).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, config.audio["num_mels"]).to(device) linear_spec = torch.rand(8, 30, config.audio["fft_size"] // 2 + 1).to(device) mel_lengths = torch.randint(20, 30, (8,)).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, 30, 1).float().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()) :, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // config.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron(config).to(device) # FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) for _ in range(5): outputs = model.forward(input_dummy, input_lengths, mel_spec, mel_lengths) optimizer.zero_grad() loss = criterion(outputs["decoder_outputs"], mel_spec, mel_lengths) stop_loss = criterion_st(outputs["stop_tokens"], stop_targets) loss = loss + criterion(outputs["model_outputs"], linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 class MultiSpeakeTacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): config = config_global.copy() config.use_speaker_embedding = True config.num_speakers = 5 input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8,)).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, config.audio["num_mels"]).to(device) linear_spec = torch.rand(8, 30, config.audio["fft_size"] // 2 + 1).to(device) mel_lengths = torch.randint(20, 30, (8,)).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, 30, 1).float().to(device) speaker_ids = torch.randint(0, 5, (8,)).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()) :, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // config.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) config.d_vector_dim = 55 model = Tacotron(config).to(device) # FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) for _ in range(5): outputs = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, aux_input={"speaker_ids": speaker_ids} ) optimizer.zero_grad() loss = criterion(outputs["decoder_outputs"], mel_spec, mel_lengths) stop_loss = criterion_st(outputs["stop_tokens"], stop_targets) loss = loss + criterion(outputs["model_outputs"], linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 class TacotronGSTTrainTest(unittest.TestCase): @staticmethod def test_train_step(): config = config_global.copy() config.use_speaker_embedding = True config.num_speakers = 10 config.use_gst = True config.gst = GSTConfig() # with random gst mel style input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8,)).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 120, config.audio["num_mels"]).to(device) linear_spec = torch.rand(8, 120, config.audio["fft_size"] // 2 + 1).to(device) mel_lengths = torch.randint(20, 120, (8,)).long().to(device) mel_lengths[-1] = 120 stop_targets = torch.zeros(8, 120, 1).float().to(device) speaker_ids = torch.randint(0, 5, (8,)).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()) :, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // config.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) config.use_gst = True config.gst = GSTConfig() model = Tacotron(config).to(device) # FIXME: missing num_speakers parameter to Tacotron ctor model.train() # print(model) print(" > Num parameters for Tacotron GST model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) for _ in range(10): outputs = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, aux_input={"speaker_ids": speaker_ids} ) optimizer.zero_grad() loss = criterion(outputs["decoder_outputs"], mel_spec, mel_lengths) stop_loss = criterion_st(outputs["stop_tokens"], stop_targets) loss = loss + criterion(outputs["model_outputs"], linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 # with file gst style mel_spec = ( torch.FloatTensor(ap.melspectrogram(ap.load_wav(WAV_FILE)))[:, :120].unsqueeze(0).transpose(1, 2).to(device) ) mel_spec = mel_spec.repeat(8, 1, 1) input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8,)).long().to(device) input_lengths[-1] = 128 linear_spec = torch.rand(8, mel_spec.size(1), config.audio["fft_size"] // 2 + 1).to(device) mel_lengths = torch.randint(20, mel_spec.size(1), (8,)).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, mel_spec.size(1), 1).float().to(device) speaker_ids = torch.randint(0, 5, (8,)).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()) :, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // config.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron(config).to(device) # FIXME: missing num_speakers parameter to Tacotron ctor model.train() # print(model) print(" > Num parameters for Tacotron GST model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) for _ in range(10): outputs = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, aux_input={"speaker_ids": speaker_ids} ) optimizer.zero_grad() loss = criterion(outputs["decoder_outputs"], mel_spec, mel_lengths) stop_loss = criterion_st(outputs["stop_tokens"], stop_targets) loss = loss + criterion(outputs["model_outputs"], linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 class TacotronCapacitronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): config = TacotronConfig( num_chars=32, num_speakers=10, use_speaker_embedding=True, out_channels=513, decoder_output_dim=80, use_capacitron_vae=True, capacitron_vae=CapacitronVAEConfig(), optimizer="CapacitronOptimizer", optimizer_params={ "RAdam": {"betas": [0.9, 0.998], "weight_decay": 1e-6}, "SGD": {"lr": 1e-5, "momentum": 0.9}, }, ) batch = dict({}) batch["text_input"] = torch.randint(0, 24, (8, 128)).long().to(device) batch["text_lengths"] = torch.randint(100, 129, (8,)).long().to(device) batch["text_lengths"] = torch.sort(batch["text_lengths"], descending=True)[0] batch["text_lengths"][0] = 128 batch["linear_input"] = torch.rand(8, 120, config.audio["fft_size"] // 2 + 1).to(device) batch["mel_input"] = torch.rand(8, 120, config.audio["num_mels"]).to(device) batch["mel_lengths"] = torch.randint(20, 120, (8,)).long().to(device) batch["mel_lengths"] = torch.sort(batch["mel_lengths"], descending=True)[0] batch["mel_lengths"][0] = 120 batch["stop_targets"] = torch.zeros(8, 120, 1).float().to(device) batch["stop_target_lengths"] = torch.randint(0, 120, (8,)).to(device) batch["speaker_ids"] = torch.randint(0, 5, (8,)).long().to(device) batch["d_vectors"] = None for idx in batch["mel_lengths"]: batch["stop_targets"][:, int(idx.item()) :, 0] = 1.0 batch["stop_targets"] = batch["stop_targets"].view( batch["text_input"].shape[0], batch["stop_targets"].size(1) // config.r, -1 ) batch["stop_targets"] = (batch["stop_targets"].sum(2) > 0.0).unsqueeze(2).float().squeeze() model = Tacotron(config).to(device) criterion = model.get_criterion() optimizer = model.get_optimizer() model.train() print(" > Num parameters for Tacotron with Capacitron VAE model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 for _ in range(10): _, loss_dict = model.train_step(batch, criterion) optimizer.zero_grad() loss_dict["capacitron_vae_beta_loss"].backward() optimizer.first_step() loss_dict["loss"].backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): config = config_global.copy() config.use_d_vector_file = True config.use_gst = True config.gst = GSTConfig() input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8,)).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, config.audio["num_mels"]).to(device) linear_spec = torch.rand(8, 30, config.audio["fft_size"] // 2 + 1).to(device) mel_lengths = torch.randint(20, 30, (8,)).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, 30, 1).float().to(device) speaker_embeddings = torch.rand(8, 55).to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()) :, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // config.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) config.d_vector_dim = 55 model = Tacotron(config).to(device) # FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) for _ in range(5): outputs = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, aux_input={"d_vectors": speaker_embeddings} ) optimizer.zero_grad() loss = criterion(outputs["decoder_outputs"], mel_spec, mel_lengths) stop_loss = criterion_st(outputs["stop_tokens"], stop_targets) loss = loss + criterion(outputs["model_outputs"], linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: name, param = name_param if name == "gst_layer.encoder.recurrence.weight_hh_l0": continue assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/linux-amd64-gpu-opt.yml
build: template_file: linux-opt-base.tyml dependencies: - "swig-linux-amd64" - "node-gyp-cache" - "pyenv-linux-amd64" - "tf_linux-amd64-gpu-opt" routes: - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.gpu" - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.gpu" - "index.project.deepspeech.deepspeech.native_client.gpu.${event.head.sha}" system_setup: > ${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml tensorflow: ${system.tensorflow.linux_amd64_cuda.url} maxRunTime: 14400 scripts: setup: "taskcluster/tc-true.sh" build: "taskcluster/cuda-build.sh" package: "taskcluster/package.sh" nc_asset_name: "native_client.amd64.cuda.linux.tar.xz" workerType: "${docker.tfBuild}" metadata: name: "DeepSpeech Linux AMD64 CUDA" description: "Building DeepSpeech for Linux/AMD64, CUDA-enabled, optimized version"
0
coqui_public_repos/inference-engine/third_party/cereal/include
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/access.hpp
/*! \file access.hpp \brief Access control and default construction */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_ACCESS_HPP_ #define CEREAL_ACCESS_HPP_ #include <type_traits> #include <iostream> #include <cstdint> #include <functional> #include "cereal/macros.hpp" #include "cereal/specialize.hpp" #include "cereal/details/helpers.hpp" namespace cereal { // ###################################################################### //! A class that allows cereal to load smart pointers to types that have no default constructor /*! If your class does not have a default constructor, cereal will not be able to load any smart pointers to it unless you overload LoadAndConstruct for your class, and provide an appropriate load_and_construct method. You can also choose to define a member static function instead of specializing this class. The specialization of LoadAndConstruct must be placed within the cereal namespace: @code{.cpp} struct MyType { MyType( int x ); // note: no default ctor int myX; // Define a serialize or load/save pair as you normally would template <class Archive> void serialize( Archive & ar ) { ar( myX ); } }; // Provide a specialization for LoadAndConstruct for your type namespace cereal { template <> struct LoadAndConstruct<MyType> { // load_and_construct will be passed the archive that you will be loading // from as well as a construct object which you can use as if it were the // constructor for your type. cereal will handle all memory management for you. template <class Archive> static void load_and_construct( Archive & ar, cereal::construct<MyType> & construct ) { int x; ar( x ); construct( x ); } // if you require versioning, simply add a const std::uint32_t as the final parameter, e.g.: // load_and_construct( Archive & ar, cereal::construct<MyType> & construct, std::uint32_t const version ) }; } // end namespace cereal @endcode Please note that just as in using external serialization functions, you cannot get access to non-public members of your class by befriending cereal::access. If you have the ability to modify the class you wish to serialize, it is recommended that you use member serialize functions and a static member load_and_construct function. load_and_construct functions, regardless of whether they are static members of your class or whether you create one in the LoadAndConstruct specialization, have the following signature: @code{.cpp} // generally Archive will be templated, but it can be specific if desired template <class Archive> static void load_and_construct( Archive & ar, cereal::construct<MyType> & construct ); // with an optional last parameter specifying the version: const std::uint32_t version @endcode Versioning behaves the same way as it does for standard serialization functions. @tparam T The type to specialize for @ingroup Access */ template <class T> struct LoadAndConstruct { }; // forward decl for construct //! @cond PRIVATE_NEVERDEFINED namespace memory_detail{ template <class Ar, class T> struct LoadAndConstructLoadWrapper; } namespace boost_variant_detail{ template <class Ar, class T> struct LoadAndConstructLoadWrapper; } //! @endcond //! Used to construct types with no default constructor /*! When serializing a type that has no default constructor, cereal will attempt to call either the class static function load_and_construct or the appropriate template specialization of LoadAndConstruct. cereal will pass that function a reference to the archive as well as a reference to a construct object which should be used to perform the allocation once data has been appropriately loaded. @code{.cpp} struct MyType { // note the lack of default constructor MyType( int xx, int yy ); int x, y; double notInConstructor; template <class Archive> void serialize( Archive & ar ) { ar( x, y ); ar( notInConstructor ); } template <class Archive> static void load_and_construct( Archive & ar, cereal::construct<MyType> & construct ) { int x, y; ar( x, y ); // use construct object to initialize with loaded data construct( x, y ); // access to member variables and functions via -> operator ar( construct->notInConstructor ); // could also do the above section by: double z; ar( z ); construct->notInConstructor = z; } }; @endcode @tparam T The class type being serialized */ template <class T> class construct { public: //! Construct and initialize the type T with the given arguments /*! This will forward all arguments to the underlying type T, calling an appropriate constructor. Calling this function more than once will result in an exception being thrown. @param args The arguments to the constructor for T @throw Exception If called more than once */ template <class ... Args> void operator()( Args && ... args ); // implementation deferred due to reliance on cereal::access //! Get a reference to the initialized underlying object /*! This must be called after the object has been initialized. @return A reference to the initialized object @throw Exception If called before initialization */ T * operator->() { if( !itsValid ) throw Exception("Object must be initialized prior to accessing members"); return itsPtr; } //! Returns a raw pointer to the initialized underlying object /*! This is mainly intended for use with passing an instance of a constructed object to cereal::base_class. It is strongly recommended to avoid using this function in any other circumstance. @return A raw pointer to the initialized type */ T * ptr() { return operator->(); } private: template <class Ar, class TT> friend struct ::cereal::memory_detail::LoadAndConstructLoadWrapper; template <class Ar, class TT> friend struct ::cereal::boost_variant_detail::LoadAndConstructLoadWrapper; construct( T * p ) : itsPtr( p ), itsEnableSharedRestoreFunction( [](){} ), itsValid( false ) {} construct( T * p, std::function<void()> enableSharedFunc ) : // g++4.7 ice with default lambda to std func itsPtr( p ), itsEnableSharedRestoreFunction( enableSharedFunc ), itsValid( false ) {} construct( construct const & ) = delete; construct & operator=( construct const & ) = delete; T * itsPtr; std::function<void()> itsEnableSharedRestoreFunction; bool itsValid; }; // ###################################################################### //! A class that can be made a friend to give cereal access to non public functions /*! If you desire non-public serialization functions within a class, cereal can only access these if you declare cereal::access a friend. @code{.cpp} class MyClass { private: friend class cereal::access; // gives access to the private serialize template <class Archive> void serialize( Archive & ar ) { // some code } }; @endcode @ingroup Access */ class access { public: // ####### Standard Serialization ######################################## template<class Archive, class T> inline static auto member_serialize(Archive & ar, T & t) -> decltype(t.CEREAL_SERIALIZE_FUNCTION_NAME(ar)) { return t.CEREAL_SERIALIZE_FUNCTION_NAME(ar); } template<class Archive, class T> inline static auto member_save(Archive & ar, T const & t) -> decltype(t.CEREAL_SAVE_FUNCTION_NAME(ar)) { return t.CEREAL_SAVE_FUNCTION_NAME(ar); } template<class Archive, class T> inline static auto member_save_non_const(Archive & ar, T & t) -> decltype(t.CEREAL_SAVE_FUNCTION_NAME(ar)) { return t.CEREAL_SAVE_FUNCTION_NAME(ar); } template<class Archive, class T> inline static auto member_load(Archive & ar, T & t) -> decltype(t.CEREAL_LOAD_FUNCTION_NAME(ar)) { return t.CEREAL_LOAD_FUNCTION_NAME(ar); } template<class Archive, class T> inline static auto member_save_minimal(Archive const & ar, T const & t) -> decltype(t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar)) { return t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar); } template<class Archive, class T> inline static auto member_save_minimal_non_const(Archive const & ar, T & t) -> decltype(t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar)) { return t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar); } template<class Archive, class T, class U> inline static auto member_load_minimal(Archive const & ar, T & t, U && u) -> decltype(t.CEREAL_LOAD_MINIMAL_FUNCTION_NAME(ar, std::forward<U>(u))) { return t.CEREAL_LOAD_MINIMAL_FUNCTION_NAME(ar, std::forward<U>(u)); } // ####### Versioned Serialization ####################################### template<class Archive, class T> inline static auto member_serialize(Archive & ar, T & t, const std::uint32_t version ) -> decltype(t.CEREAL_SERIALIZE_FUNCTION_NAME(ar, version)) { return t.CEREAL_SERIALIZE_FUNCTION_NAME(ar, version); } template<class Archive, class T> inline static auto member_save(Archive & ar, T const & t, const std::uint32_t version ) -> decltype(t.CEREAL_SAVE_FUNCTION_NAME(ar, version)) { return t.CEREAL_SAVE_FUNCTION_NAME(ar, version); } template<class Archive, class T> inline static auto member_save_non_const(Archive & ar, T & t, const std::uint32_t version ) -> decltype(t.CEREAL_SAVE_FUNCTION_NAME(ar, version)) { return t.CEREAL_SAVE_FUNCTION_NAME(ar, version); } template<class Archive, class T> inline static auto member_load(Archive & ar, T & t, const std::uint32_t version ) -> decltype(t.CEREAL_LOAD_FUNCTION_NAME(ar, version)) { return t.CEREAL_LOAD_FUNCTION_NAME(ar, version); } template<class Archive, class T> inline static auto member_save_minimal(Archive const & ar, T const & t, const std::uint32_t version) -> decltype(t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar, version)) { return t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar, version); } template<class Archive, class T> inline static auto member_save_minimal_non_const(Archive const & ar, T & t, const std::uint32_t version) -> decltype(t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar, version)) { return t.CEREAL_SAVE_MINIMAL_FUNCTION_NAME(ar, version); } template<class Archive, class T, class U> inline static auto member_load_minimal(Archive const & ar, T & t, U && u, const std::uint32_t version) -> decltype(t.CEREAL_LOAD_MINIMAL_FUNCTION_NAME(ar, std::forward<U>(u), version)) { return t.CEREAL_LOAD_MINIMAL_FUNCTION_NAME(ar, std::forward<U>(u), version); } // ####### Other Functionality ########################################## // for detecting inheritance from enable_shared_from_this template <class T> inline static auto shared_from_this(T & t) -> decltype(t.shared_from_this()); // for placement new template <class T, class ... Args> inline static void construct( T *& ptr, Args && ... args ) { new (ptr) T( std::forward<Args>( args )... ); } // for non-placement new with a default constructor template <class T> inline static T * construct() { return new T(); } template <class T> inline static std::false_type load_and_construct(...) { return std::false_type(); } template<class T, class Archive> inline static auto load_and_construct(Archive & ar, ::cereal::construct<T> & construct) -> decltype(T::load_and_construct(ar, construct)) { T::load_and_construct( ar, construct ); } template<class T, class Archive> inline static auto load_and_construct(Archive & ar, ::cereal::construct<T> & construct, const std::uint32_t version) -> decltype(T::load_and_construct(ar, construct, version)) { T::load_and_construct( ar, construct, version ); } }; // end class access // ###################################################################### // Deferred Implementation, see construct for more information template <class T> template <class ... Args> inline void construct<T>::operator()( Args && ... args ) { if( itsValid ) throw Exception("Attempting to construct an already initialized object"); ::cereal::access::construct( itsPtr, std::forward<Args>( args )... ); itsEnableSharedRestoreFunction(); itsValid = true; } } // namespace cereal #endif // CEREAL_ACCESS_HPP_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/equivalent.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to determine the equivalence of two FSTs. #ifndef FST_EQUIVALENT_H_ #define FST_EQUIVALENT_H_ #include <algorithm> #include <deque> #include <unordered_map> #include <utility> #include <vector> #include <fst/log.h> #include <fst/encode.h> #include <fst/push.h> #include <fst/union-find.h> #include <fst/vector-fst.h> namespace fst { namespace internal { // Traits-like struct holding utility functions/typedefs/constants for // the equivalence algorithm. // // Encoding device: in order to make the statesets of the two acceptors // disjoint, we map Arc::StateId on the type MappedId. The states of // the first acceptor are mapped on odd numbers (s -> 2s + 1), and // those of the second one on even numbers (s -> 2s + 2). The number 0 // is reserved for an implicit (non-final) dead state (required for // the correct treatment of non-coaccessible states; kNoStateId is mapped to // kDeadState for both acceptors). The union-find algorithm operates on the // mapped IDs. template <class Arc> struct EquivalenceUtil { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using MappedId = StateId; // ID for an equivalence class. // MappedId for an implicit dead state. static constexpr MappedId kDeadState = 0; // MappedId for lookup failure. static constexpr MappedId kInvalidId = -1; // Maps state ID to the representative of the corresponding // equivalence class. The parameter 'which_fst' takes the values 1 // and 2, identifying the input FST. static MappedId MapState(StateId s, int32 which_fst) { return (kNoStateId == s) ? kDeadState : (static_cast<MappedId>(s) << 1) + which_fst; } // Maps set ID to State ID. static StateId UnMapState(MappedId id) { return static_cast<StateId>((--id) >> 1); } // Convenience function: checks if state with MappedId s is final in // acceptor fa. static bool IsFinal(const Fst<Arc> &fa, MappedId s) { return (kDeadState == s) ? false : (fa.Final(UnMapState(s)) != Weight::Zero()); } // Convenience function: returns the representative of ID in sets, // creating a new set if needed. static MappedId FindSet(UnionFind<MappedId> *sets, MappedId id) { const auto repr = sets->FindSet(id); if (repr != kInvalidId) { return repr; } else { sets->MakeSet(id); return id; } } }; template <class Arc> constexpr typename EquivalenceUtil<Arc>::MappedId EquivalenceUtil<Arc>::kDeadState; template <class Arc> constexpr typename EquivalenceUtil<Arc>::MappedId EquivalenceUtil<Arc>::kInvalidId; } // namespace internal // Equivalence checking algorithm: determines if the two FSTs fst1 and fst2 // are equivalent. The input FSTs must be deterministic input-side epsilon-free // acceptors, unweighted or with weights over a left semiring. Two acceptors are // considered equivalent if they accept exactly the same set of strings (with // the same weights). // // The algorithm (cf. Aho, Hopcroft and Ullman, "The Design and Analysis of // Computer Programs") successively constructs sets of states that can be // reached by the same prefixes, starting with a set containing the start states // of both acceptors. A disjoint tree forest (the union-find algorithm) is used // to represent the sets of states. The algorithm returns false if one of the // constructed sets contains both final and non-final states. Returns an // optional error value (useful when FLAGS_error_fatal = false). // // Complexity: // // Quasi-linear, i.e., O(n G(n)), where // // n = |S1| + |S2| is the number of states in both acceptors // // G(n) is a very slowly growing function that can be approximated // by 4 by all practical purposes. template <class Arc> bool Equivalent(const Fst<Arc> &fst1, const Fst<Arc> &fst2, float delta = kDelta, bool *error = nullptr) { using Weight = typename Arc::Weight; if (error) *error = false; // Check that the symbol table are compatible. if (!CompatSymbols(fst1.InputSymbols(), fst2.InputSymbols()) || !CompatSymbols(fst1.OutputSymbols(), fst2.OutputSymbols())) { FSTERROR() << "Equivalent: Input/output symbol tables of 1st argument " << "do not match input/output symbol tables of 2nd argument"; if (error) *error = true; return false; } // Check properties first. static constexpr auto props = kNoEpsilons | kIDeterministic | kAcceptor; if (fst1.Properties(props, true) != props) { FSTERROR() << "Equivalent: 1st argument not an" << " epsilon-free deterministic acceptor"; if (error) *error = true; return false; } if (fst2.Properties(props, true) != props) { FSTERROR() << "Equivalent: 2nd argument not an" << " epsilon-free deterministic acceptor"; if (error) *error = true; return false; } if ((fst1.Properties(kUnweighted, true) != kUnweighted) || (fst2.Properties(kUnweighted, true) != kUnweighted)) { VectorFst<Arc> efst1(fst1); VectorFst<Arc> efst2(fst2); Push(&efst1, REWEIGHT_TO_INITIAL, delta); Push(&efst2, REWEIGHT_TO_INITIAL, delta); ArcMap(&efst1, QuantizeMapper<Arc>(delta)); ArcMap(&efst2, QuantizeMapper<Arc>(delta)); EncodeMapper<Arc> mapper(kEncodeWeights | kEncodeLabels, ENCODE); ArcMap(&efst1, &mapper); ArcMap(&efst2, &mapper); return Equivalent(efst1, efst2); } using Util = internal::EquivalenceUtil<Arc>; using MappedId = typename Util::MappedId; enum { FST1 = 1, FST2 = 2 }; // Required by Util::MapState(...) auto s1 = Util::MapState(fst1.Start(), FST1); auto s2 = Util::MapState(fst2.Start(), FST2); // The union-find structure. UnionFind<MappedId> eq_classes(1000, Util::kInvalidId); // Initializes the union-find structure. eq_classes.MakeSet(s1); eq_classes.MakeSet(s2); // Data structure for the (partial) acceptor transition function of fst1 and // fst2: input labels mapped to pairs of MappedIds representing destination // states of the corresponding arcs in fst1 and fst2, respectively. using Label2StatePairMap = std::unordered_map<typename Arc::Label, std::pair<MappedId, MappedId>>; Label2StatePairMap arc_pairs; // Pairs of MappedId's to be processed, organized in a queue. std::deque<std::pair<MappedId, MappedId>> q; bool ret = true; // Returns early if the start states differ w.r.t. finality. if (Util::IsFinal(fst1, s1) != Util::IsFinal(fst2, s2)) ret = false; // Main loop: explores the two acceptors in a breadth-first manner, updating // the equivalence relation on the statesets. Loop invariant: each block of // the states contains either final states only or non-final states only. for (q.push_back(std::make_pair(s1, s2)); ret && !q.empty(); q.pop_front()) { s1 = q.front().first; s2 = q.front().second; // Representatives of the equivalence classes of s1/s2. const auto rep1 = Util::FindSet(&eq_classes, s1); const auto rep2 = Util::FindSet(&eq_classes, s2); if (rep1 != rep2) { eq_classes.Union(rep1, rep2); arc_pairs.clear(); // Copies outgoing arcs starting at s1 into the hash-table. if (Util::kDeadState != s1) { ArcIterator<Fst<Arc>> arc_iter(fst1, Util::UnMapState(s1)); for (; !arc_iter.Done(); arc_iter.Next()) { const auto &arc = arc_iter.Value(); // Zero-weight arcs are treated as if they did not exist. if (arc.weight != Weight::Zero()) { arc_pairs[arc.ilabel].first = Util::MapState(arc.nextstate, FST1); } } } // Copies outgoing arcs starting at s2 into the hashtable. if (Util::kDeadState != s2) { ArcIterator<Fst<Arc>> arc_iter(fst2, Util::UnMapState(s2)); for (; !arc_iter.Done(); arc_iter.Next()) { const auto &arc = arc_iter.Value(); // Zero-weight arcs are treated as if they did not exist. if (arc.weight != Weight::Zero()) { arc_pairs[arc.ilabel].second = Util::MapState(arc.nextstate, FST2); } } } // Iterates through the hashtable and process pairs of target states. for (const auto &arc_iter : arc_pairs) { const auto &pair = arc_iter.second; if (Util::IsFinal(fst1, pair.first) != Util::IsFinal(fst2, pair.second)) { // Detected inconsistency: return false. ret = false; break; } q.push_back(pair); } } } if (fst1.Properties(kError, false) || fst2.Properties(kError, false)) { if (error) *error = true; return false; } return ret; } } // namespace fst #endif // FST_EQUIVALENT_H_
0
coqui_public_repos/TTS/TTS
coqui_public_repos/TTS/TTS/bin/train_encoder.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import time import traceback import torch from torch.utils.data import DataLoader from trainer.io import copy_model_files, save_best_model, save_checkpoint from trainer.torch import NoamLR from trainer.trainer_utils import get_optimizer from TTS.encoder.dataset import EncoderDataset from TTS.encoder.utils.generic_utils import setup_encoder_model from TTS.encoder.utils.training import init_training from TTS.encoder.utils.visual import plot_embeddings from TTS.tts.datasets import load_tts_samples from TTS.utils.audio import AudioProcessor from TTS.utils.generic_utils import count_parameters, remove_experiment_folder from TTS.utils.samplers import PerfectBatchSampler from TTS.utils.training import check_update torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.manual_seed(54321) use_cuda = torch.cuda.is_available() num_gpus = torch.cuda.device_count() print(" > Using CUDA: ", use_cuda) print(" > Number of GPUs: ", num_gpus) def setup_loader(ap: AudioProcessor, is_val: bool = False, verbose: bool = False): num_utter_per_class = c.num_utter_per_class if not is_val else c.eval_num_utter_per_class num_classes_in_batch = c.num_classes_in_batch if not is_val else c.eval_num_classes_in_batch dataset = EncoderDataset( c, ap, meta_data_eval if is_val else meta_data_train, voice_len=c.voice_len, num_utter_per_class=num_utter_per_class, num_classes_in_batch=num_classes_in_batch, verbose=verbose, augmentation_config=c.audio_augmentation if not is_val else None, use_torch_spec=c.model_params.get("use_torch_spec", False), ) # get classes list classes = dataset.get_class_list() sampler = PerfectBatchSampler( dataset.items, classes, batch_size=num_classes_in_batch * num_utter_per_class, # total batch size num_classes_in_batch=num_classes_in_batch, num_gpus=1, shuffle=not is_val, drop_last=True, ) if len(classes) < num_classes_in_batch: if is_val: raise RuntimeError( f"config.eval_num_classes_in_batch ({num_classes_in_batch}) need to be <= {len(classes)} (Number total of Classes in the Eval dataset) !" ) raise RuntimeError( f"config.num_classes_in_batch ({num_classes_in_batch}) need to be <= {len(classes)} (Number total of Classes in the Train dataset) !" ) # set the classes to avoid get wrong class_id when the number of training and eval classes are not equal if is_val: dataset.set_classes(train_classes) loader = DataLoader( dataset, num_workers=c.num_loader_workers, batch_sampler=sampler, collate_fn=dataset.collate_fn, ) return loader, classes, dataset.get_map_classid_to_classname() def evaluation(model, criterion, data_loader, global_step): eval_loss = 0 for _, data in enumerate(data_loader): with torch.no_grad(): # setup input data inputs, labels = data # agroup samples of each class in the batch. perfect sampler produces [3,2,1,3,2,1] we need [3,3,2,2,1,1] labels = torch.transpose( labels.view(c.eval_num_utter_per_class, c.eval_num_classes_in_batch), 0, 1 ).reshape(labels.shape) inputs = torch.transpose( inputs.view(c.eval_num_utter_per_class, c.eval_num_classes_in_batch, -1), 0, 1 ).reshape(inputs.shape) # dispatch data to GPU if use_cuda: inputs = inputs.cuda(non_blocking=True) labels = labels.cuda(non_blocking=True) # forward pass model outputs = model(inputs) # loss computation loss = criterion( outputs.view(c.eval_num_classes_in_batch, outputs.shape[0] // c.eval_num_classes_in_batch, -1), labels ) eval_loss += loss.item() eval_avg_loss = eval_loss / len(data_loader) # save stats dashboard_logger.eval_stats(global_step, {"loss": eval_avg_loss}) # plot the last batch in the evaluation figures = { "UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), c.num_classes_in_batch), } dashboard_logger.eval_figures(global_step, figures) return eval_avg_loss def train(model, optimizer, scheduler, criterion, data_loader, eval_data_loader, global_step): model.train() best_loss = {"train_loss": None, "eval_loss": float("inf")} avg_loader_time = 0 end_time = time.time() for epoch in range(c.epochs): tot_loss = 0 epoch_time = 0 for _, data in enumerate(data_loader): start_time = time.time() # setup input data inputs, labels = data # agroup samples of each class in the batch. perfect sampler produces [3,2,1,3,2,1] we need [3,3,2,2,1,1] labels = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape( labels.shape ) inputs = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape( inputs.shape ) # ToDo: move it to a unit test # labels_converted = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape(labels.shape) # inputs_converted = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape(inputs.shape) # idx = 0 # for j in range(0, c.num_classes_in_batch, 1): # for i in range(j, len(labels), c.num_classes_in_batch): # if not torch.all(labels[i].eq(labels_converted[idx])) or not torch.all(inputs[i].eq(inputs_converted[idx])): # print("Invalid") # print(labels) # exit() # idx += 1 # labels = labels_converted # inputs = inputs_converted loader_time = time.time() - end_time global_step += 1 # setup lr if c.lr_decay: scheduler.step() optimizer.zero_grad() # dispatch data to GPU if use_cuda: inputs = inputs.cuda(non_blocking=True) labels = labels.cuda(non_blocking=True) # forward pass model outputs = model(inputs) # loss computation loss = criterion( outputs.view(c.num_classes_in_batch, outputs.shape[0] // c.num_classes_in_batch, -1), labels ) loss.backward() grad_norm, _ = check_update(model, c.grad_clip) optimizer.step() step_time = time.time() - start_time epoch_time += step_time # acumulate the total epoch loss tot_loss += loss.item() # Averaged Loader Time num_loader_workers = c.num_loader_workers if c.num_loader_workers > 0 else 1 avg_loader_time = ( 1 / num_loader_workers * loader_time + (num_loader_workers - 1) / num_loader_workers * avg_loader_time if avg_loader_time != 0 else loader_time ) current_lr = optimizer.param_groups[0]["lr"] if global_step % c.steps_plot_stats == 0: # Plot Training Epoch Stats train_stats = { "loss": loss.item(), "lr": current_lr, "grad_norm": grad_norm, "step_time": step_time, "avg_loader_time": avg_loader_time, } dashboard_logger.train_epoch_stats(global_step, train_stats) figures = { "UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), c.num_classes_in_batch), } dashboard_logger.train_figures(global_step, figures) if global_step % c.print_step == 0: print( " | > Step:{} Loss:{:.5f} GradNorm:{:.5f} " "StepTime:{:.2f} LoaderTime:{:.2f} AvGLoaderTime:{:.2f} LR:{:.6f}".format( global_step, loss.item(), grad_norm, step_time, loader_time, avg_loader_time, current_lr ), flush=True, ) if global_step % c.save_step == 0: # save model save_checkpoint( c, model, optimizer, None, global_step, epoch, OUT_PATH, criterion=criterion.state_dict() ) end_time = time.time() print("") print( ">>> Epoch:{} AvgLoss: {:.5f} GradNorm:{:.5f} " "EpochTime:{:.2f} AvGLoaderTime:{:.2f} ".format( epoch, tot_loss / len(data_loader), grad_norm, epoch_time, avg_loader_time ), flush=True, ) # evaluation if c.run_eval: model.eval() eval_loss = evaluation(model, criterion, eval_data_loader, global_step) print("\n\n") print("--> EVAL PERFORMANCE") print( " | > Epoch:{} AvgLoss: {:.5f} ".format(epoch, eval_loss), flush=True, ) # save the best checkpoint best_loss = save_best_model( {"train_loss": None, "eval_loss": eval_loss}, best_loss, c, model, optimizer, None, global_step, epoch, OUT_PATH, criterion=criterion.state_dict(), ) model.train() return best_loss, global_step def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global meta_data_train global meta_data_eval global train_classes ap = AudioProcessor(**c.audio) model = setup_encoder_model(c) optimizer = get_optimizer(c.optimizer, c.optimizer_params, c.lr, model) # pylint: disable=redefined-outer-name meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=True) train_data_loader, train_classes, map_classid_to_classname = setup_loader(ap, is_val=False, verbose=True) if c.run_eval: eval_data_loader, _, _ = setup_loader(ap, is_val=True, verbose=True) else: eval_data_loader = None num_classes = len(train_classes) criterion = model.get_criterion(c, num_classes) if c.loss == "softmaxproto" and c.model != "speaker_encoder": c.map_classid_to_classname = map_classid_to_classname copy_model_files(c, OUT_PATH, new_fields={}) if args.restore_path: criterion, args.restore_step = model.load_checkpoint( c, args.restore_path, eval=False, use_cuda=use_cuda, criterion=criterion ) print(" > Model restored from step %d" % args.restore_step, flush=True) else: args.restore_step = 0 if c.lr_decay: scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1) else: scheduler = None num_params = count_parameters(model) print("\n > Model has {} parameters".format(num_params), flush=True) if use_cuda: model = model.cuda() criterion.cuda() global_step = args.restore_step _, global_step = train(model, optimizer, scheduler, criterion, train_data_loader, eval_data_loader, global_step) if __name__ == "__main__": args, c, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = init_training() try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/map.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Compatibility file for old-style Map() functions and MapFst class that have // been renamed to ArcMap (cf. StateMap). #ifndef FST_MAP_H_ #define FST_MAP_H_ #include <fst/arc-map.h> namespace fst { template <class A, class C> void Map(MutableFst<A> *fst, C *mapper) { ArcMap(fst, mapper); } template <class A, class C> void Map(MutableFst<A> *fst, C mapper) { ArcMap(fst, mapper); } template <class A, class B, class C> void Map(const Fst<A> &ifst, MutableFst<B> *ofst, C *mapper) { ArcMap(ifst, ofst, mapper); } template <class A, class B, class C> void Map(const Fst<A> &ifst, MutableFst<B> *ofst, C mapper) { ArcMap(ifst, ofst, mapper); } using MapFstOptions = ArcMapFstOptions; template <class A, class B, class C> class MapFst : public ArcMapFst<A, B, C> { public: using FromArc = A; using ToArc = B; using StateId = typename ToArc::StateId; using Weight = typename ToArc::Weight; using State = CacheState<B>; MapFst(const Fst<A> &fst, const C &mapper, const MapFstOptions &opts) : ArcMapFst<A, B, C>(fst, mapper, opts) {} MapFst(const Fst<A> &fst, C *mapper, const MapFstOptions &opts) : ArcMapFst<A, B, C>(fst, mapper, opts) {} MapFst(const Fst<A> &fst, const C &mapper) : ArcMapFst<A, B, C>(fst, mapper) {} MapFst(const Fst<A> &fst, C *mapper) : ArcMapFst<A, B, C>(fst, mapper) {} // See Fst<>::Copy() for doc. MapFst(const MapFst<A, B, C> &fst, bool safe = false) : ArcMapFst<A, B, C>(fst, safe) {} // Get a copy of this MapFst. See Fst<>::Copy() for further doc. MapFst<A, B, C> *Copy(bool safe = false) const override { return new MapFst(*this, safe); } }; // Specialization for MapFst. template <class A, class B, class C> class StateIterator<MapFst<A, B, C>> : public StateIterator<ArcMapFst<A, B, C>> { public: explicit StateIterator(const ArcMapFst<A, B, C> &fst) : StateIterator<ArcMapFst<A, B, C>>(fst) {} }; // Specialization for MapFst. template <class A, class B, class C> class ArcIterator<MapFst<A, B, C>> : public ArcIterator<ArcMapFst<A, B, C>> { public: ArcIterator(const ArcMapFst<A, B, C> &fst, typename A::StateId s) : ArcIterator<ArcMapFst<A, B, C>>(fst, s) {} }; // For backwards compatibility only; use IdentityArcMapper otherwise. template <class A> struct IdentityMapper { using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return arc; } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props; } }; } // namespace fst #endif // FST_MAP_H_
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/archives/json.hpp
/*! \file json.hpp \brief JSON input and output archives */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_ARCHIVES_JSON_HPP_ #define CEREAL_ARCHIVES_JSON_HPP_ #include "cereal/cereal.hpp" #include "cereal/details/util.hpp" namespace cereal { //! An exception thrown when rapidjson fails an internal assertion /*! @ingroup Utility */ struct RapidJSONException : Exception { RapidJSONException( const char * what_ ) : Exception( what_ ) {} }; } // Inform rapidjson that assert will throw #ifndef CEREAL_RAPIDJSON_ASSERT_THROWS #define CEREAL_RAPIDJSON_ASSERT_THROWS #endif // CEREAL_RAPIDJSON_ASSERT_THROWS // Override rapidjson assertions to throw exceptions by default #ifndef CEREAL_RAPIDJSON_ASSERT #define CEREAL_RAPIDJSON_ASSERT(x) if(!(x)){ \ throw ::cereal::RapidJSONException("rapidjson internal assertion failure: " #x); } #endif // RAPIDJSON_ASSERT // Enable support for parsing of nan, inf, -inf #ifndef CEREAL_RAPIDJSON_WRITE_DEFAULT_FLAGS #define CEREAL_RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNanAndInfFlag #endif // Enable support for parsing of nan, inf, -inf #ifndef CEREAL_RAPIDJSON_PARSE_DEFAULT_FLAGS #define CEREAL_RAPIDJSON_PARSE_DEFAULT_FLAGS kParseFullPrecisionFlag | kParseNanAndInfFlag #endif #include "cereal/external/rapidjson/prettywriter.h" #include "cereal/external/rapidjson/ostreamwrapper.h" #include "cereal/external/rapidjson/istreamwrapper.h" #include "cereal/external/rapidjson/document.h" #include "cereal/external/base64.hpp" #include <limits> #include <sstream> #include <stack> #include <vector> #include <string> namespace cereal { // ###################################################################### //! An output archive designed to save data to JSON /*! This archive uses RapidJSON to build serialize data to JSON. JSON archives provides a human readable output but at decreased performance (both in time and space) compared to binary archives. JSON archives are only guaranteed to finish flushing their contents upon destruction and should thus be used in an RAII fashion. JSON benefits greatly from name-value pairs, which if present, will name the nodes in the output. If these are not present, each level of the output will be given an automatically generated delimited name. The precision of the output archive controls the number of decimals output for floating point numbers and should be sufficiently large (i.e. at least 20) if there is a desire to have binary equality between the numbers output and those read in. In general you should expect a loss of precision when going from floating point to text and back. JSON archives do not output the size information for any dynamically sized structure and instead infer it from the number of children for a node. This means that data can be hand edited for dynamic sized structures and will still be readable. This is accomplished through the cereal::SizeTag object, which will cause the archive to output the data as a JSON array (e.g. marked by [] instead of {}), which indicates that the container is variable sized and may be edited. \ingroup Archives */ class JSONOutputArchive : public OutputArchive<JSONOutputArchive>, public traits::TextArchive { enum class NodeType { StartObject, InObject, StartArray, InArray }; using WriteStream = CEREAL_RAPIDJSON_NAMESPACE::OStreamWrapper; using JSONWriter = CEREAL_RAPIDJSON_NAMESPACE::PrettyWriter<WriteStream>; public: /*! @name Common Functionality Common use cases for directly interacting with an JSONOutputArchive */ //! @{ //! A class containing various advanced options for the JSON archive class Options { public: //! Default options static Options Default(){ return Options(); } //! Default options with no indentation static Options NoIndent(){ return Options( JSONWriter::kDefaultMaxDecimalPlaces, IndentChar::space, 0 ); } //! The character to use for indenting enum class IndentChar : char { space = ' ', tab = '\t', newline = '\n', carriage_return = '\r' }; //! Specify specific options for the JSONOutputArchive /*! @param precision The precision used for floating point numbers @param indentChar The type of character to indent with @param indentLength The number of indentChar to use for indentation (0 corresponds to no indentation) */ explicit Options( int precision = JSONWriter::kDefaultMaxDecimalPlaces, IndentChar indentChar = IndentChar::space, unsigned int indentLength = 4 ) : itsPrecision( precision ), itsIndentChar( static_cast<char>(indentChar) ), itsIndentLength( indentLength ) { } private: friend class JSONOutputArchive; int itsPrecision; char itsIndentChar; unsigned int itsIndentLength; }; //! Construct, outputting to the provided stream /*! @param stream The stream to output to. @param options The JSON specific options to use. See the Options struct for the values of default parameters */ JSONOutputArchive(std::ostream & stream, Options const & options = Options::Default() ) : OutputArchive<JSONOutputArchive>(this), itsWriteStream(stream), itsWriter(itsWriteStream), itsNextName(nullptr) { itsWriter.SetMaxDecimalPlaces( options.itsPrecision ); itsWriter.SetIndent( options.itsIndentChar, options.itsIndentLength ); itsNameCounter.push(0); itsNodeStack.push(NodeType::StartObject); } //! Destructor, flushes the JSON ~JSONOutputArchive() CEREAL_NOEXCEPT { if (itsNodeStack.top() == NodeType::InObject) itsWriter.EndObject(); else if (itsNodeStack.top() == NodeType::InArray) itsWriter.EndArray(); } //! Saves some binary data, encoded as a base64 string, with an optional name /*! This will create a new node, optionally named, and insert a value that consists of the data encoded as a base64 string */ void saveBinaryValue( const void * data, size_t size, const char * name = nullptr ) { setNextName( name ); writeName(); auto base64string = base64::encode( reinterpret_cast<const unsigned char *>( data ), size ); saveValue( base64string ); }; //! @} /*! @name Internal Functionality Functionality designed for use by those requiring control over the inner mechanisms of the JSONOutputArchive */ //! @{ //! Starts a new node in the JSON output /*! The node can optionally be given a name by calling setNextName prior to creating the node Nodes only need to be started for types that are themselves objects or arrays */ void startNode() { writeName(); itsNodeStack.push(NodeType::StartObject); itsNameCounter.push(0); } //! Designates the most recently added node as finished void finishNode() { // if we ended up serializing an empty object or array, writeName // will never have been called - so start and then immediately end // the object/array. // // We'll also end any object/arrays we happen to be in switch(itsNodeStack.top()) { case NodeType::StartArray: itsWriter.StartArray(); // fall through case NodeType::InArray: itsWriter.EndArray(); break; case NodeType::StartObject: itsWriter.StartObject(); // fall through case NodeType::InObject: itsWriter.EndObject(); break; } itsNodeStack.pop(); itsNameCounter.pop(); } //! Sets the name for the next node created with startNode void setNextName( const char * name ) { itsNextName = name; } //! Saves a bool to the current node void saveValue(bool b) { itsWriter.Bool(b); } //! Saves an int to the current node void saveValue(int i) { itsWriter.Int(i); } //! Saves a uint to the current node void saveValue(unsigned u) { itsWriter.Uint(u); } //! Saves an int64 to the current node void saveValue(int64_t i64) { itsWriter.Int64(i64); } //! Saves a uint64 to the current node void saveValue(uint64_t u64) { itsWriter.Uint64(u64); } //! Saves a double to the current node void saveValue(double d) { itsWriter.Double(d); } //! Saves a string to the current node void saveValue(std::string const & s) { itsWriter.String(s.c_str(), static_cast<CEREAL_RAPIDJSON_NAMESPACE::SizeType>( s.size() )); } //! Saves a const char * to the current node void saveValue(char const * s) { itsWriter.String(s); } //! Saves a nullptr to the current node void saveValue(std::nullptr_t) { itsWriter.Null(); } private: // Some compilers/OS have difficulty disambiguating the above for various flavors of longs, so we provide // special overloads to handle these cases. //! 32 bit signed long saving to current node template <class T, traits::EnableIf<sizeof(T) == sizeof(std::int32_t), std::is_signed<T>::value> = traits::sfinae> inline void saveLong(T l){ saveValue( static_cast<std::int32_t>( l ) ); } //! non 32 bit signed long saving to current node template <class T, traits::EnableIf<sizeof(T) != sizeof(std::int32_t), std::is_signed<T>::value> = traits::sfinae> inline void saveLong(T l){ saveValue( static_cast<std::int64_t>( l ) ); } //! 32 bit unsigned long saving to current node template <class T, traits::EnableIf<sizeof(T) == sizeof(std::int32_t), std::is_unsigned<T>::value> = traits::sfinae> inline void saveLong(T lu){ saveValue( static_cast<std::uint32_t>( lu ) ); } //! non 32 bit unsigned long saving to current node template <class T, traits::EnableIf<sizeof(T) != sizeof(std::int32_t), std::is_unsigned<T>::value> = traits::sfinae> inline void saveLong(T lu){ saveValue( static_cast<std::uint64_t>( lu ) ); } public: #ifdef _MSC_VER //! MSVC only long overload to current node void saveValue( unsigned long lu ){ saveLong( lu ); }; #else // _MSC_VER //! Serialize a long if it would not be caught otherwise template <class T, traits::EnableIf<std::is_same<T, long>::value, !std::is_same<T, std::int32_t>::value, !std::is_same<T, std::int64_t>::value> = traits::sfinae> inline void saveValue( T t ){ saveLong( t ); } //! Serialize an unsigned long if it would not be caught otherwise template <class T, traits::EnableIf<std::is_same<T, unsigned long>::value, !std::is_same<T, std::uint32_t>::value, !std::is_same<T, std::uint64_t>::value> = traits::sfinae> inline void saveValue( T t ){ saveLong( t ); } #endif // _MSC_VER //! Save exotic arithmetic as strings to current node /*! Handles long long (if distinct from other types), unsigned long (if distinct), and long double */ template <class T, traits::EnableIf<std::is_arithmetic<T>::value, !std::is_same<T, long>::value, !std::is_same<T, unsigned long>::value, !std::is_same<T, std::int64_t>::value, !std::is_same<T, std::uint64_t>::value, (sizeof(T) >= sizeof(long double) || sizeof(T) >= sizeof(long long))> = traits::sfinae> inline void saveValue(T const & t) { std::stringstream ss; ss.precision( std::numeric_limits<long double>::max_digits10 ); ss << t; saveValue( ss.str() ); } //! Write the name of the upcoming node and prepare object/array state /*! Since writeName is called for every value that is output, regardless of whether it has a name or not, it is the place where we will do a deferred check of our node state and decide whether we are in an array or an object. The general workflow of saving to the JSON archive is: 1. (optional) Set the name for the next node to be created, usually done by an NVP 2. Start the node 3. (if there is data to save) Write the name of the node (this function) 4. (if there is data to save) Save the data (with saveValue) 5. Finish the node */ void writeName() { NodeType const & nodeType = itsNodeStack.top(); // Start up either an object or an array, depending on state if(nodeType == NodeType::StartArray) { itsWriter.StartArray(); itsNodeStack.top() = NodeType::InArray; } else if(nodeType == NodeType::StartObject) { itsNodeStack.top() = NodeType::InObject; itsWriter.StartObject(); } // Array types do not output names if(nodeType == NodeType::InArray) return; if(itsNextName == nullptr) { std::string name = "value" + std::to_string( itsNameCounter.top()++ ) + "\0"; saveValue(name); } else { saveValue(itsNextName); itsNextName = nullptr; } } //! Designates that the current node should be output as an array, not an object void makeArray() { itsNodeStack.top() = NodeType::StartArray; } //! @} private: WriteStream itsWriteStream; //!< Rapidjson write stream JSONWriter itsWriter; //!< Rapidjson writer char const * itsNextName; //!< The next name std::stack<uint32_t> itsNameCounter; //!< Counter for creating unique names for unnamed nodes std::stack<NodeType> itsNodeStack; }; // JSONOutputArchive // ###################################################################### //! An input archive designed to load data from JSON /*! This archive uses RapidJSON to read in a JSON archive. As with the output JSON archive, the preferred way to use this archive is in an RAII fashion, ensuring its destruction after all data has been read. Input JSON should have been produced by the JSONOutputArchive. Data can only be added to dynamically sized containers (marked by JSON arrays) - the input archive will determine their size by looking at the number of child nodes. Only JSON originating from a JSONOutputArchive is officially supported, but data from other sources may work if properly formatted. The JSONInputArchive does not require that nodes are loaded in the same order they were saved by JSONOutputArchive. Using name value pairs (NVPs), it is possible to load in an out of order fashion or otherwise skip/select specific nodes to load. The default behavior of the input archive is to read sequentially starting with the first node and exploring its children. When a given NVP does not match the read in name for a node, the archive will search for that node at the current level and load it if it exists. After loading an out of order node, the archive will then proceed back to loading sequentially from its new position. Consider this simple example where loading of some data is skipped: @code{cpp} // imagine the input file has someData(1-9) saved in order at the top level node ar( someData1, someData2, someData3 ); // XML loads in the order it sees in the file ar( cereal::make_nvp( "hello", someData6 ) ); // NVP given does not // match expected NVP name, so we search // for the given NVP and load that value ar( someData7, someData8, someData9 ); // with no NVP given, loading resumes at its // current location, proceeding sequentially @endcode \ingroup Archives */ class JSONInputArchive : public InputArchive<JSONInputArchive>, public traits::TextArchive { private: using ReadStream = CEREAL_RAPIDJSON_NAMESPACE::IStreamWrapper; typedef CEREAL_RAPIDJSON_NAMESPACE::GenericValue<CEREAL_RAPIDJSON_NAMESPACE::UTF8<>> JSONValue; typedef JSONValue::ConstMemberIterator MemberIterator; typedef JSONValue::ConstValueIterator ValueIterator; typedef CEREAL_RAPIDJSON_NAMESPACE::Document::GenericValue GenericValue; public: /*! @name Common Functionality Common use cases for directly interacting with an JSONInputArchive */ //! @{ //! Construct, reading from the provided stream /*! @param stream The stream to read from */ JSONInputArchive(std::istream & stream) : InputArchive<JSONInputArchive>(this), itsNextName( nullptr ), itsReadStream(stream) { itsDocument.ParseStream<>(itsReadStream); if (itsDocument.IsArray()) itsIteratorStack.emplace_back(itsDocument.Begin(), itsDocument.End()); else itsIteratorStack.emplace_back(itsDocument.MemberBegin(), itsDocument.MemberEnd()); } ~JSONInputArchive() CEREAL_NOEXCEPT = default; //! Loads some binary data, encoded as a base64 string /*! This will automatically start and finish a node to load the data, and can be called directly by users. Note that this follows the same ordering rules specified in the class description in regards to loading in/out of order */ void loadBinaryValue( void * data, size_t size, const char * name = nullptr ) { itsNextName = name; std::string encoded; loadValue( encoded ); auto decoded = base64::decode( encoded ); if( size != decoded.size() ) throw Exception("Decoded binary data size does not match specified size"); std::memcpy( data, decoded.data(), decoded.size() ); itsNextName = nullptr; }; private: //! @} /*! @name Internal Functionality Functionality designed for use by those requiring control over the inner mechanisms of the JSONInputArchive */ //! @{ //! An internal iterator that handles both array and object types /*! This class is a variant and holds both types of iterators that rapidJSON supports - one for arrays and one for objects. */ class Iterator { public: Iterator() : itsIndex( 0 ), itsType(Null_) {} Iterator(MemberIterator begin, MemberIterator end) : itsMemberItBegin(begin), itsMemberItEnd(end), itsIndex(0), itsType(Member) { if( std::distance( begin, end ) == 0 ) itsType = Null_; } Iterator(ValueIterator begin, ValueIterator end) : itsValueItBegin(begin), itsIndex(0), itsType(Value) { if( std::distance( begin, end ) == 0 ) itsType = Null_; } //! Advance to the next node Iterator & operator++() { ++itsIndex; return *this; } //! Get the value of the current node GenericValue const & value() { switch(itsType) { case Value : return itsValueItBegin[itsIndex]; case Member: return itsMemberItBegin[itsIndex].value; default: throw cereal::Exception("JSONInputArchive internal error: null or empty iterator to object or array!"); } } //! Get the name of the current node, or nullptr if it has no name const char * name() const { if( itsType == Member && (itsMemberItBegin + itsIndex) != itsMemberItEnd ) return itsMemberItBegin[itsIndex].name.GetString(); else return nullptr; } //! Adjust our position such that we are at the node with the given name /*! @throws Exception if no such named node exists */ inline void search( const char * searchName ) { const auto len = std::strlen( searchName ); size_t index = 0; for( auto it = itsMemberItBegin; it != itsMemberItEnd; ++it, ++index ) { const auto currentName = it->name.GetString(); if( ( std::strncmp( searchName, currentName, len ) == 0 ) && ( std::strlen( currentName ) == len ) ) { itsIndex = index; return; } } throw Exception("JSON Parsing failed - provided NVP (" + std::string(searchName) + ") not found"); } private: MemberIterator itsMemberItBegin, itsMemberItEnd; //!< The member iterator (object) ValueIterator itsValueItBegin; //!< The value iterator (array) size_t itsIndex; //!< The current index of this iterator enum Type {Value, Member, Null_} itsType; //!< Whether this holds values (array) or members (objects) or nothing }; //! Searches for the expectedName node if it doesn't match the actualName /*! This needs to be called before every load or node start occurs. This function will check to see if an NVP has been provided (with setNextName) and if so, see if that name matches the actual next name given. If the names do not match, it will search in the current level of the JSON for that name. If the name is not found, an exception will be thrown. Resets the NVP name after called. @throws Exception if an expectedName is given and not found */ inline void search() { // The name an NVP provided with setNextName() if( itsNextName ) { // The actual name of the current node auto const actualName = itsIteratorStack.back().name(); // Do a search if we don't see a name coming up, or if the names don't match if( !actualName || std::strcmp( itsNextName, actualName ) != 0 ) itsIteratorStack.back().search( itsNextName ); } itsNextName = nullptr; } public: //! Starts a new node, going into its proper iterator /*! This places an iterator for the next node to be parsed onto the iterator stack. If the next node is an array, this will be a value iterator, otherwise it will be a member iterator. By default our strategy is to start with the document root node and then recursively iterate through all children in the order they show up in the document. We don't need to know NVPs to do this; we'll just blindly load in the order things appear in. If we were given an NVP, we will search for it if it does not match our the name of the next node that would normally be loaded. This functionality is provided by search(). */ void startNode() { search(); if(itsIteratorStack.back().value().IsArray()) itsIteratorStack.emplace_back(itsIteratorStack.back().value().Begin(), itsIteratorStack.back().value().End()); else itsIteratorStack.emplace_back(itsIteratorStack.back().value().MemberBegin(), itsIteratorStack.back().value().MemberEnd()); } //! Finishes the most recently started node void finishNode() { itsIteratorStack.pop_back(); ++itsIteratorStack.back(); } //! Retrieves the current node name /*! @return nullptr if no name exists */ const char * getNodeName() const { return itsIteratorStack.back().name(); } //! Sets the name for the next node created with startNode void setNextName( const char * name ) { itsNextName = name; } //! Loads a value from the current node - small signed overload template <class T, traits::EnableIf<std::is_signed<T>::value, sizeof(T) < sizeof(int64_t)> = traits::sfinae> inline void loadValue(T & val) { search(); val = static_cast<T>( itsIteratorStack.back().value().GetInt() ); ++itsIteratorStack.back(); } //! Loads a value from the current node - small unsigned overload template <class T, traits::EnableIf<std::is_unsigned<T>::value, sizeof(T) < sizeof(uint64_t), !std::is_same<bool, T>::value> = traits::sfinae> inline void loadValue(T & val) { search(); val = static_cast<T>( itsIteratorStack.back().value().GetUint() ); ++itsIteratorStack.back(); } //! Loads a value from the current node - bool overload void loadValue(bool & val) { search(); val = itsIteratorStack.back().value().GetBool(); ++itsIteratorStack.back(); } //! Loads a value from the current node - int64 overload void loadValue(int64_t & val) { search(); val = itsIteratorStack.back().value().GetInt64(); ++itsIteratorStack.back(); } //! Loads a value from the current node - uint64 overload void loadValue(uint64_t & val) { search(); val = itsIteratorStack.back().value().GetUint64(); ++itsIteratorStack.back(); } //! Loads a value from the current node - float overload void loadValue(float & val) { search(); val = static_cast<float>(itsIteratorStack.back().value().GetDouble()); ++itsIteratorStack.back(); } //! Loads a value from the current node - double overload void loadValue(double & val) { search(); val = itsIteratorStack.back().value().GetDouble(); ++itsIteratorStack.back(); } //! Loads a value from the current node - string overload void loadValue(std::string & val) { search(); val = itsIteratorStack.back().value().GetString(); ++itsIteratorStack.back(); } //! Loads a nullptr from the current node void loadValue(std::nullptr_t&) { search(); CEREAL_RAPIDJSON_ASSERT(itsIteratorStack.back().value().IsNull()); ++itsIteratorStack.back(); } // Special cases to handle various flavors of long, which tend to conflict with // the int32_t or int64_t on various compiler/OS combinations. MSVC doesn't need any of this. #ifndef _MSC_VER private: //! 32 bit signed long loading from current node template <class T> inline typename std::enable_if<sizeof(T) == sizeof(std::int32_t) && std::is_signed<T>::value, void>::type loadLong(T & l){ loadValue( reinterpret_cast<std::int32_t&>( l ) ); } //! non 32 bit signed long loading from current node template <class T> inline typename std::enable_if<sizeof(T) == sizeof(std::int64_t) && std::is_signed<T>::value, void>::type loadLong(T & l){ loadValue( reinterpret_cast<std::int64_t&>( l ) ); } //! 32 bit unsigned long loading from current node template <class T> inline typename std::enable_if<sizeof(T) == sizeof(std::uint32_t) && !std::is_signed<T>::value, void>::type loadLong(T & lu){ loadValue( reinterpret_cast<std::uint32_t&>( lu ) ); } //! non 32 bit unsigned long loading from current node template <class T> inline typename std::enable_if<sizeof(T) == sizeof(std::uint64_t) && !std::is_signed<T>::value, void>::type loadLong(T & lu){ loadValue( reinterpret_cast<std::uint64_t&>( lu ) ); } public: //! Serialize a long if it would not be caught otherwise template <class T> inline typename std::enable_if<std::is_same<T, long>::value && sizeof(T) >= sizeof(std::int64_t) && !std::is_same<T, std::int64_t>::value, void>::type loadValue( T & t ){ loadLong(t); } //! Serialize an unsigned long if it would not be caught otherwise template <class T> inline typename std::enable_if<std::is_same<T, unsigned long>::value && sizeof(T) >= sizeof(std::uint64_t) && !std::is_same<T, std::uint64_t>::value, void>::type loadValue( T & t ){ loadLong(t); } #endif // _MSC_VER private: //! Convert a string to a long long void stringToNumber( std::string const & str, long long & val ) { val = std::stoll( str ); } //! Convert a string to an unsigned long long void stringToNumber( std::string const & str, unsigned long long & val ) { val = std::stoull( str ); } //! Convert a string to a long double void stringToNumber( std::string const & str, long double & val ) { val = std::stold( str ); } public: //! Loads a value from the current node - long double and long long overloads template <class T, traits::EnableIf<std::is_arithmetic<T>::value, !std::is_same<T, long>::value, !std::is_same<T, unsigned long>::value, !std::is_same<T, std::int64_t>::value, !std::is_same<T, std::uint64_t>::value, (sizeof(T) >= sizeof(long double) || sizeof(T) >= sizeof(long long))> = traits::sfinae> inline void loadValue(T & val) { std::string encoded; loadValue( encoded ); stringToNumber( encoded, val ); } //! Loads the size for a SizeTag void loadSize(size_type & size) { if (itsIteratorStack.size() == 1) size = itsDocument.Size(); else size = (itsIteratorStack.rbegin() + 1)->value().Size(); } //! @} private: const char * itsNextName; //!< Next name set by NVP ReadStream itsReadStream; //!< Rapidjson write stream std::vector<Iterator> itsIteratorStack; //!< 'Stack' of rapidJSON iterators CEREAL_RAPIDJSON_NAMESPACE::Document itsDocument; //!< Rapidjson document }; // ###################################################################### // JSONArchive prologue and epilogue functions // ###################################################################### // ###################################################################### //! Prologue for NVPs for JSON archives /*! NVPs do not start or finish nodes - they just set up the names */ template <class T> inline void prologue( JSONOutputArchive &, NameValuePair<T> const & ) { } //! Prologue for NVPs for JSON archives template <class T> inline void prologue( JSONInputArchive &, NameValuePair<T> const & ) { } // ###################################################################### //! Epilogue for NVPs for JSON archives /*! NVPs do not start or finish nodes - they just set up the names */ template <class T> inline void epilogue( JSONOutputArchive &, NameValuePair<T> const & ) { } //! Epilogue for NVPs for JSON archives /*! NVPs do not start or finish nodes - they just set up the names */ template <class T> inline void epilogue( JSONInputArchive &, NameValuePair<T> const & ) { } // ###################################################################### //! Prologue for deferred data for JSON archives /*! Do nothing for the defer wrapper */ template <class T> inline void prologue( JSONOutputArchive &, DeferredData<T> const & ) { } //! Prologue for deferred data for JSON archives template <class T> inline void prologue( JSONInputArchive &, DeferredData<T> const & ) { } // ###################################################################### //! Epilogue for deferred for JSON archives /*! NVPs do not start or finish nodes - they just set up the names */ template <class T> inline void epilogue( JSONOutputArchive &, DeferredData<T> const & ) { } //! Epilogue for deferred for JSON archives /*! Do nothing for the defer wrapper */ template <class T> inline void epilogue( JSONInputArchive &, DeferredData<T> const & ) { } // ###################################################################### //! Prologue for SizeTags for JSON archives /*! SizeTags are strictly ignored for JSON, they just indicate that the current node should be made into an array */ template <class T> inline void prologue( JSONOutputArchive & ar, SizeTag<T> const & ) { ar.makeArray(); } //! Prologue for SizeTags for JSON archives template <class T> inline void prologue( JSONInputArchive &, SizeTag<T> const & ) { } // ###################################################################### //! Epilogue for SizeTags for JSON archives /*! SizeTags are strictly ignored for JSON */ template <class T> inline void epilogue( JSONOutputArchive &, SizeTag<T> const & ) { } //! Epilogue for SizeTags for JSON archives template <class T> inline void epilogue( JSONInputArchive &, SizeTag<T> const & ) { } // ###################################################################### //! Prologue for all other types for JSON archives (except minimal types) /*! Starts a new node, named either automatically or by some NVP, that may be given data by the type about to be archived Minimal types do not start or finish nodes */ template <class T, traits::EnableIf<!std::is_arithmetic<T>::value, !traits::has_minimal_base_class_serialization<T, traits::has_minimal_output_serialization, JSONOutputArchive>::value, !traits::has_minimal_output_serialization<T, JSONOutputArchive>::value> = traits::sfinae> inline void prologue( JSONOutputArchive & ar, T const & ) { ar.startNode(); } //! Prologue for all other types for JSON archives template <class T, traits::EnableIf<!std::is_arithmetic<T>::value, !traits::has_minimal_base_class_serialization<T, traits::has_minimal_input_serialization, JSONInputArchive>::value, !traits::has_minimal_input_serialization<T, JSONInputArchive>::value> = traits::sfinae> inline void prologue( JSONInputArchive & ar, T const & ) { ar.startNode(); } // ###################################################################### //! Epilogue for all other types other for JSON archives (except minimal types) /*! Finishes the node created in the prologue Minimal types do not start or finish nodes */ template <class T, traits::EnableIf<!std::is_arithmetic<T>::value, !traits::has_minimal_base_class_serialization<T, traits::has_minimal_output_serialization, JSONOutputArchive>::value, !traits::has_minimal_output_serialization<T, JSONOutputArchive>::value> = traits::sfinae> inline void epilogue( JSONOutputArchive & ar, T const & ) { ar.finishNode(); } //! Epilogue for all other types other for JSON archives template <class T, traits::EnableIf<!std::is_arithmetic<T>::value, !traits::has_minimal_base_class_serialization<T, traits::has_minimal_input_serialization, JSONInputArchive>::value, !traits::has_minimal_input_serialization<T, JSONInputArchive>::value> = traits::sfinae> inline void epilogue( JSONInputArchive & ar, T const & ) { ar.finishNode(); } // ###################################################################### //! Prologue for arithmetic types for JSON archives inline void prologue( JSONOutputArchive & ar, std::nullptr_t const & ) { ar.writeName(); } //! Prologue for arithmetic types for JSON archives inline void prologue( JSONInputArchive &, std::nullptr_t const & ) { } // ###################################################################### //! Epilogue for arithmetic types for JSON archives inline void epilogue( JSONOutputArchive &, std::nullptr_t const & ) { } //! Epilogue for arithmetic types for JSON archives inline void epilogue( JSONInputArchive &, std::nullptr_t const & ) { } // ###################################################################### //! Prologue for arithmetic types for JSON archives template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void prologue( JSONOutputArchive & ar, T const & ) { ar.writeName(); } //! Prologue for arithmetic types for JSON archives template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void prologue( JSONInputArchive &, T const & ) { } // ###################################################################### //! Epilogue for arithmetic types for JSON archives template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void epilogue( JSONOutputArchive &, T const & ) { } //! Epilogue for arithmetic types for JSON archives template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void epilogue( JSONInputArchive &, T const & ) { } // ###################################################################### //! Prologue for strings for JSON archives template<class CharT, class Traits, class Alloc> inline void prologue(JSONOutputArchive & ar, std::basic_string<CharT, Traits, Alloc> const &) { ar.writeName(); } //! Prologue for strings for JSON archives template<class CharT, class Traits, class Alloc> inline void prologue(JSONInputArchive &, std::basic_string<CharT, Traits, Alloc> const &) { } // ###################################################################### //! Epilogue for strings for JSON archives template<class CharT, class Traits, class Alloc> inline void epilogue(JSONOutputArchive &, std::basic_string<CharT, Traits, Alloc> const &) { } //! Epilogue for strings for JSON archives template<class CharT, class Traits, class Alloc> inline void epilogue(JSONInputArchive &, std::basic_string<CharT, Traits, Alloc> const &) { } // ###################################################################### // Common JSONArchive serialization functions // ###################################################################### //! Serializing NVP types to JSON template <class T> inline void CEREAL_SAVE_FUNCTION_NAME( JSONOutputArchive & ar, NameValuePair<T> const & t ) { ar.setNextName( t.name ); ar( t.value ); } template <class T> inline void CEREAL_LOAD_FUNCTION_NAME( JSONInputArchive & ar, NameValuePair<T> & t ) { ar.setNextName( t.name ); ar( t.value ); } //! Saving for nullptr to JSON inline void CEREAL_SAVE_FUNCTION_NAME(JSONOutputArchive & ar, std::nullptr_t const & t) { ar.saveValue( t ); } //! Loading arithmetic from JSON inline void CEREAL_LOAD_FUNCTION_NAME(JSONInputArchive & ar, std::nullptr_t & t) { ar.loadValue( t ); } //! Saving for arithmetic to JSON template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void CEREAL_SAVE_FUNCTION_NAME(JSONOutputArchive & ar, T const & t) { ar.saveValue( t ); } //! Loading arithmetic from JSON template <class T, traits::EnableIf<std::is_arithmetic<T>::value> = traits::sfinae> inline void CEREAL_LOAD_FUNCTION_NAME(JSONInputArchive & ar, T & t) { ar.loadValue( t ); } //! saving string to JSON template<class CharT, class Traits, class Alloc> inline void CEREAL_SAVE_FUNCTION_NAME(JSONOutputArchive & ar, std::basic_string<CharT, Traits, Alloc> const & str) { ar.saveValue( str ); } //! loading string from JSON template<class CharT, class Traits, class Alloc> inline void CEREAL_LOAD_FUNCTION_NAME(JSONInputArchive & ar, std::basic_string<CharT, Traits, Alloc> & str) { ar.loadValue( str ); } // ###################################################################### //! Saving SizeTags to JSON template <class T> inline void CEREAL_SAVE_FUNCTION_NAME( JSONOutputArchive &, SizeTag<T> const & ) { // nothing to do here, we don't explicitly save the size } //! Loading SizeTags from JSON template <class T> inline void CEREAL_LOAD_FUNCTION_NAME( JSONInputArchive & ar, SizeTag<T> & st ) { ar.loadSize( st.size ); } } // namespace cereal // register archives for polymorphic support CEREAL_REGISTER_ARCHIVE(cereal::JSONInputArchive) CEREAL_REGISTER_ARCHIVE(cereal::JSONOutputArchive) // tie input and output archives together CEREAL_SETUP_ARCHIVE_TRAITS(cereal::JSONInputArchive, cereal::JSONOutputArchive) #endif // CEREAL_ARCHIVES_JSON_HPP_
0
coqui_public_repos/snakepit
coqui_public_repos/snakepit/bin/prepare-images.sh
#!/usr/bin/env bash set -e roles=(daemon worker) print_header () { printf "\n>>>>>>>> $1 <<<<<<<<\n\n" } print_header "Configuring image source" bin/prepare-lxd.sh for role in "${roles[@]}"; do print_header "Creating ${role} image" lxc init ubuntu-minimal:18.04/amd64 snakepit-${role} lxc start snakepit-${role} exe="lxc exec snakepit-${role} -- " sleep 2 $exe systemctl isolate multi-user.target print_header "Starting ${role} setup" tar cf - -C scripts/${role} . | lxc exec snakepit-${role} -- tar xvf - --no-same-owner -C /root $exe bash /root/setup.sh done
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/expand.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Expands a PDT to an FST. #ifndef FST_EXTENSIONS_PDT_EXPAND_H_ #define FST_EXTENSIONS_PDT_EXPAND_H_ #include <forward_list> #include <vector> #include <fst/log.h> #include <fst/extensions/pdt/paren.h> #include <fst/extensions/pdt/pdt.h> #include <fst/extensions/pdt/reverse.h> #include <fst/extensions/pdt/shortest-path.h> #include <fst/cache.h> #include <fst/mutable-fst.h> #include <fst/queue.h> #include <fst/state-table.h> #include <fst/test-properties.h> namespace fst { template <class Arc> struct PdtExpandFstOptions : public CacheOptions { bool keep_parentheses; PdtStack<typename Arc::StateId, typename Arc::Label> *stack; PdtStateTable<typename Arc::StateId, typename Arc::StateId> *state_table; explicit PdtExpandFstOptions( const CacheOptions &opts = CacheOptions(), bool keep_parentheses = false, PdtStack<typename Arc::StateId, typename Arc::Label> *stack = nullptr, PdtStateTable<typename Arc::StateId, typename Arc::StateId> *state_table = nullptr) : CacheOptions(opts), keep_parentheses(keep_parentheses), stack(stack), state_table(state_table) {} }; namespace internal { // Implementation class for PdtExpandFst. template <class Arc> class PdtExpandFstImpl : public CacheImpl<Arc> { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using StackId = StateId; using StateTuple = PdtStateTuple<StateId, StackId>; using FstImpl<Arc>::SetType; using FstImpl<Arc>::SetProperties; using FstImpl<Arc>::Properties; using FstImpl<Arc>::SetInputSymbols; using FstImpl<Arc>::SetOutputSymbols; using CacheBaseImpl<CacheState<Arc>>::PushArc; using CacheBaseImpl<CacheState<Arc>>::HasArcs; using CacheBaseImpl<CacheState<Arc>>::HasFinal; using CacheBaseImpl<CacheState<Arc>>::HasStart; using CacheBaseImpl<CacheState<Arc>>::SetArcs; using CacheBaseImpl<CacheState<Arc>>::SetFinal; using CacheBaseImpl<CacheState<Arc>>::SetStart; PdtExpandFstImpl(const Fst<Arc> &fst, const std::vector<std::pair<Label, Label>> &parens, const PdtExpandFstOptions<Arc> &opts) : CacheImpl<Arc>(opts), fst_(fst.Copy()), stack_(opts.stack ? opts.stack : new PdtStack<StateId, Label>(parens)), state_table_(opts.state_table ? opts.state_table : new PdtStateTable<StateId, StackId>()), own_stack_(opts.stack == 0), own_state_table_(opts.state_table == 0), keep_parentheses_(opts.keep_parentheses) { SetType("expand"); const auto props = fst.Properties(kFstProperties, false); SetProperties(PdtExpandProperties(props), kCopyProperties); SetInputSymbols(fst.InputSymbols()); SetOutputSymbols(fst.OutputSymbols()); } PdtExpandFstImpl(const PdtExpandFstImpl &impl) : CacheImpl<Arc>(impl), fst_(impl.fst_->Copy(true)), stack_(new PdtStack<StateId, Label>(*impl.stack_)), state_table_(new PdtStateTable<StateId, StackId>()), own_stack_(true), own_state_table_(true), keep_parentheses_(impl.keep_parentheses_) { SetType("expand"); SetProperties(impl.Properties(), kCopyProperties); SetInputSymbols(impl.InputSymbols()); SetOutputSymbols(impl.OutputSymbols()); } ~PdtExpandFstImpl() override { if (own_stack_) delete stack_; if (own_state_table_) delete state_table_; } StateId Start() { if (!HasStart()) { const auto s = fst_->Start(); if (s == kNoStateId) return kNoStateId; StateTuple tuple(s, 0); const auto start = state_table_->FindState(tuple); SetStart(start); } return CacheImpl<Arc>::Start(); } Weight Final(StateId s) { if (!HasFinal(s)) { const auto &tuple = state_table_->Tuple(s); const auto weight = fst_->Final(tuple.state_id); if (weight != Weight::Zero() && tuple.stack_id == 0) SetFinal(s, weight); else SetFinal(s, Weight::Zero()); } return CacheImpl<Arc>::Final(s); } size_t NumArcs(StateId s) { if (!HasArcs(s)) ExpandState(s); return CacheImpl<Arc>::NumArcs(s); } size_t NumInputEpsilons(StateId s) { if (!HasArcs(s)) ExpandState(s); return CacheImpl<Arc>::NumInputEpsilons(s); } size_t NumOutputEpsilons(StateId s) { if (!HasArcs(s)) ExpandState(s); return CacheImpl<Arc>::NumOutputEpsilons(s); } void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) { if (!HasArcs(s)) ExpandState(s); CacheImpl<Arc>::InitArcIterator(s, data); } // Computes the outgoing transitions from a state, creating new destination // states as needed. void ExpandState(StateId s) { StateTuple tuple = state_table_->Tuple(s); for (ArcIterator<Fst<Arc>> aiter(*fst_, tuple.state_id); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); const auto stack_id = stack_->Find(tuple.stack_id, arc.ilabel); if (stack_id == -1) { // Non-matching close parenthesis. continue; } else if ((stack_id != tuple.stack_id) && !keep_parentheses_) { // Stack push/pop. arc.ilabel = 0; arc.olabel = 0; } StateTuple ntuple(arc.nextstate, stack_id); arc.nextstate = state_table_->FindState(ntuple); PushArc(s, arc); } SetArcs(s); } const PdtStack<StackId, Label> &GetStack() const { return *stack_; } const PdtStateTable<StateId, StackId> &GetStateTable() const { return *state_table_; } private: // Properties for an expanded PDT. inline uint64_t PdtExpandProperties(uint64_t inprops) { return inprops & (kAcceptor | kAcyclic | kInitialAcyclic | kUnweighted); } std::unique_ptr<const Fst<Arc>> fst_; PdtStack<StackId, Label> *stack_; PdtStateTable<StateId, StackId> *state_table_; bool own_stack_; bool own_state_table_; bool keep_parentheses_; }; } // namespace internal // Expands a pushdown transducer (PDT) encoded as an FST into an FST. This // version is a delayed FST. In the PDT, some transitions are labeled with open // or close parentheses. To be interpreted as a PDT, the parens must balance on // a path. The open-close parenthesis label pairs are passed using the parens // argument. The expansion enforces the parenthesis constraints. The PDT must be // expandable as an FST. // // This class attaches interface to implementation and handles reference // counting, delegating most methods to ImplToFst. template <class A> class PdtExpandFst : public ImplToFst<internal::PdtExpandFstImpl<A>> { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using StackId = StateId; using Store = DefaultCacheStore<Arc>; using State = typename Store::State; using Impl = internal::PdtExpandFstImpl<Arc>; friend class ArcIterator<PdtExpandFst<Arc>>; friend class StateIterator<PdtExpandFst<Arc>>; PdtExpandFst(const Fst<Arc> &fst, const std::vector<std::pair<Label, Label>> &parens) : ImplToFst<Impl>( std::make_shared<Impl>(fst, parens, PdtExpandFstOptions<A>())) {} PdtExpandFst(const Fst<Arc> &fst, const std::vector<std::pair<Label, Label>> &parens, const PdtExpandFstOptions<Arc> &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst, parens, opts)) {} // See Fst<>::Copy() for doc. PdtExpandFst(const PdtExpandFst<Arc> &fst, bool safe = false) : ImplToFst<Impl>(fst, safe) {} // Gets a copy of this ExpandFst. See Fst<>::Copy() for further doc. PdtExpandFst<Arc> *Copy(bool safe = false) const override { return new PdtExpandFst<Arc>(*this, safe); } inline void InitStateIterator(StateIteratorData<Arc> *data) const override; void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override { GetMutableImpl()->InitArcIterator(s, data); } const PdtStack<StackId, Label> &GetStack() const { return GetImpl()->GetStack(); } const PdtStateTable<StateId, StackId> &GetStateTable() const { return GetImpl()->GetStateTable(); } private: using ImplToFst<Impl>::GetImpl; using ImplToFst<Impl>::GetMutableImpl; void operator=(const PdtExpandFst &) = delete; }; // Specialization for PdtExpandFst. template <class Arc> class StateIterator<PdtExpandFst<Arc>> : public CacheStateIterator<PdtExpandFst<Arc>> { public: explicit StateIterator(const PdtExpandFst<Arc> &fst) : CacheStateIterator<PdtExpandFst<Arc>>(fst, fst.GetMutableImpl()) {} }; // Specialization for PdtExpandFst. template <class Arc> class ArcIterator<PdtExpandFst<Arc>> : public CacheArcIterator<PdtExpandFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const PdtExpandFst<Arc> &fst, StateId s) : CacheArcIterator<PdtExpandFst<Arc>>(fst.GetMutableImpl(), s) { if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->ExpandState(s); } }; template <class Arc> inline void PdtExpandFst<Arc>::InitStateIterator( StateIteratorData<Arc> *data) const { data->base = new StateIterator<PdtExpandFst<Arc>>(*this); } // PrunedExpand prunes the delayed expansion of a pushdown transducer (PDT) // encoded as an FST into an FST. In the PDT, some transitions are labeled with // open or close parentheses. To be interpreted as a PDT, the parens must // balance on a path. The open-close parenthesis label pairs are passed // using the parens argument. The expansion enforces the parenthesis // constraints. // // The algorithm works by visiting the delayed ExpandFst using a shortest-stack // first queue discipline and relies on the shortest-distance information // computed using a reverse shortest-path call to perform the pruning. // // The algorithm maintains the same state ordering between the ExpandFst being // visited (efst_) and the result of pruning written into the MutableFst (ofst_) // to improve readability. template <class Arc> class PdtPrunedExpand { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using StackId = StateId; using Stack = PdtStack<StackId, Label>; using StateTable = PdtStateTable<StateId, StackId>; using SetIterator = typename internal::PdtBalanceData<Arc>::SetIterator; // Constructor taking as input a PDT specified by by an input FST and a vector // of parentheses. The keep_parentheses argument specifies whether parentheses // are replaced by epsilons or not during the expansion. The cache options are // passed to the underlying ExpandFst. PdtPrunedExpand(const Fst<Arc> &ifst, const std::vector<std::pair<Label, Label>> &parens, bool keep_parentheses = false, const CacheOptions &opts = CacheOptions()) : ifst_(ifst.Copy()), keep_parentheses_(keep_parentheses), stack_(parens), efst_(ifst, parens, PdtExpandFstOptions<Arc>(opts, true, &stack_, &state_table_)), queue_(state_table_, stack_, stack_length_, distance_, fdistance_), error_(false) { Reverse(*ifst_, parens, &rfst_); VectorFst<Arc> path; reverse_shortest_path_.reset(new PdtShortestPath<Arc, FifoQueue<StateId>>( rfst_, parens, PdtShortestPathOptions<Arc, FifoQueue<StateId>>(true, false))); reverse_shortest_path_->ShortestPath(&path); error_ = (path.Properties(kError, true) == kError); balance_data_.reset(reverse_shortest_path_->GetBalanceData()->Reverse( rfst_.NumStates(), 10, -1)); InitCloseParenMultimap(parens); } bool Error() const { return error_; } // Expands and prunes the input PDT according to the provided weight // threshold, wirting the result into an output mutable FST. void Expand(MutableFst<Arc> *ofst, const Weight &threshold); private: static constexpr uint8_t kEnqueued = 0x01; static constexpr uint8_t kExpanded = 0x02; static constexpr uint8_t kSourceState = 0x04; // Comparison functor used by the queue: // // 1. States corresponding to shortest stack first, and // 2. for stacks of matching length, reverse lexicographic order is used, and // 3. for states with the same stack, shortest-first order is used. class StackCompare { public: StackCompare(const StateTable &state_table, const Stack &stack, const std::vector<StackId> &stack_length, const std::vector<Weight> &distance, const std::vector<Weight> &fdistance) : state_table_(state_table), stack_(stack), stack_length_(stack_length), distance_(distance), fdistance_(fdistance) {} bool operator()(StateId s1, StateId s2) const { auto si1 = state_table_.Tuple(s1).stack_id; auto si2 = state_table_.Tuple(s2).stack_id; if (stack_length_[si1] < stack_length_[si2]) return true; if (stack_length_[si1] > stack_length_[si2]) return false; // If stack IDs are equal, use A*. if (si1 == si2) { return less_(Distance(s1), Distance(s2)); } // If lengths are equal, uses reverse lexicographic order. for (; si1 != si2; si1 = stack_.Pop(si1), si2 = stack_.Pop(si2)) { if (stack_.Top(si1) < stack_.Top(si2)) return true; if (stack_.Top(si1) > stack_.Top(si2)) return false; } return false; } private: Weight Distance(StateId s) const { return (s < distance_.size()) && (s < fdistance_.size()) ? Times(distance_[s], fdistance_[s]) : Weight::Zero(); } const StateTable &state_table_; const Stack &stack_; const std::vector<StackId> &stack_length_; const std::vector<Weight> &distance_; const std::vector<Weight> &fdistance_; const NaturalLess<Weight> less_; }; class ShortestStackFirstQueue : public ShortestFirstQueue<StateId, StackCompare> { public: ShortestStackFirstQueue(const PdtStateTable<StateId, StackId> &state_table, const Stack &stack, const std::vector<StackId> &stack_length, const std::vector<Weight> &distance, const std::vector<Weight> &fdistance) : ShortestFirstQueue<StateId, StackCompare>(StackCompare( state_table, stack, stack_length, distance, fdistance)) {} }; void InitCloseParenMultimap( const std::vector<std::pair<Label, Label>> &parens); Weight DistanceToDest(StateId source, StateId dest) const; uint8_t Flags(StateId s) const; void SetFlags(StateId s, uint8_t flags, uint8_t mask); Weight Distance(StateId s) const; void SetDistance(StateId s, Weight weight); Weight FinalDistance(StateId s) const; void SetFinalDistance(StateId s, Weight weight); StateId SourceState(StateId s) const; void SetSourceState(StateId s, StateId p); void AddStateAndEnqueue(StateId s); void Relax(StateId s, const Arc &arc, Weight weight); bool PruneArc(StateId s, const Arc &arc); void ProcStart(); void ProcFinal(StateId s); bool ProcNonParen(StateId s, const Arc &arc, bool add_arc); bool ProcOpenParen(StateId s, const Arc &arc, StackId si, StackId nsi); bool ProcCloseParen(StateId s, const Arc &arc); void ProcDestStates(StateId s, StackId si); // Input PDT. std::unique_ptr<Fst<Arc>> ifst_; // Reversed PDT. VectorFst<Arc> rfst_; // Keep parentheses in ofst? const bool keep_parentheses_; // State table for efst_. StateTable state_table_; // Stack trie. Stack stack_; // Expanded PDT. PdtExpandFst<Arc> efst_; // Length of stack for given stack ID. std::vector<StackId> stack_length_; // Distance from initial state in efst_/ofst. std::vector<Weight> distance_; // Distance to final states in efst_/ofst. std::vector<Weight> fdistance_; // Queue used to visit efst_. ShortestStackFirstQueue queue_; // Construction time failure? bool error_; // Status flags for states in efst_/ofst. std::vector<uint8_t> flags_; // PDT source state for each expanded state. std::vector<StateId> sources_; // Shortest path for rfst_. std::unique_ptr<PdtShortestPath<Arc, FifoQueue<StateId>>> reverse_shortest_path_; std::unique_ptr<internal::PdtBalanceData<Arc>> balance_data_; // Maps open paren arcs to balancing close paren arcs. typename PdtShortestPath<Arc, FifoQueue<StateId>>::CloseParenMultimap close_paren_multimap_; MutableFst<Arc> *ofst_; // Output FST. Weight limit_; // Weight limit. // Maps a state s in ifst (i.e., the source of a closed paranthesis matching // the top of current_stack_id_ to final states in efst_. std::unordered_map<StateId, Weight> dest_map_; // Stack ID of the states currently at the top of the queue, i.e., the states // currently being popped and processed. StackId current_stack_id_; std::ptrdiff_t current_paren_id_; // Paren ID at top of current stack. std::ptrdiff_t cached_stack_id_; StateId cached_source_; // The set of pairs of destination states and weights to final states for the // source state cached_source_ and the paren ID cached_paren_id_; i.e., the // set of source states of a closed parenthesis with paren ID cached_paren_id // balancing an incoming open parenthesis with paren ID cached_paren_id_ in // state cached_source_. std::forward_list<std::pair<StateId, Weight>> cached_dest_list_; NaturalLess<Weight> less_; }; // Initializes close paren multimap, mapping pairs (s, paren_id) to all the arcs // out of s labeled with close parenthese for paren_id. template <class Arc> void PdtPrunedExpand<Arc>::InitCloseParenMultimap( const std::vector<std::pair<Label, Label>> &parens) { std::unordered_map<Label, Label> paren_map; for (size_t i = 0; i < parens.size(); ++i) { const auto &pair = parens[i]; paren_map[pair.first] = i; paren_map[pair.second] = i; } for (StateIterator<Fst<Arc>> siter(*ifst_); !siter.Done(); siter.Next()) { const auto s = siter.Value(); for (ArcIterator<Fst<Arc>> aiter(*ifst_, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); const auto it = paren_map.find(arc.ilabel); if (it == paren_map.end()) continue; if (arc.ilabel == parens[it->second].second) { // Close paren. const internal::ParenState<Arc> key(it->second, s); close_paren_multimap_.emplace(key, arc); } } } } // Returns the weight of the shortest balanced path from source to dest // in ifst_; dest must be the source state of a close paren arc. template <class Arc> typename Arc::Weight PdtPrunedExpand<Arc>::DistanceToDest(StateId source, StateId dest) const { using SearchState = typename PdtShortestPath<Arc, FifoQueue<StateId>>::SearchState; const SearchState ss(source + 1, dest + 1); const auto distance = reverse_shortest_path_->GetShortestPathData().Distance(ss); VLOG(2) << "D(" << source << ", " << dest << ") =" << distance; return distance; } // Returns the flags for state s in ofst_. template <class Arc> uint8_t PdtPrunedExpand<Arc>::Flags(StateId s) const { return s < flags_.size() ? flags_[s] : 0; } // Modifies the flags for state s in ofst_. template <class Arc> void PdtPrunedExpand<Arc>::SetFlags(StateId s, uint8_t flags, uint8_t mask) { while (flags_.size() <= s) flags_.push_back(0); flags_[s] &= ~mask; flags_[s] |= flags & mask; } // Returns the shortest distance from the initial state to s in ofst_. template <class Arc> typename Arc::Weight PdtPrunedExpand<Arc>::Distance(StateId s) const { return s < distance_.size() ? distance_[s] : Weight::Zero(); } // Sets the shortest distance from the initial state to s in ofst_. template <class Arc> void PdtPrunedExpand<Arc>::SetDistance(StateId s, Weight weight) { while (distance_.size() <= s) distance_.push_back(Weight::Zero()); distance_[s] = std::move(weight); } // Returns the shortest distance from s to the final states in ofst_. template <class Arc> typename Arc::Weight PdtPrunedExpand<Arc>::FinalDistance(StateId s) const { return s < fdistance_.size() ? fdistance_[s] : Weight::Zero(); } // Sets the shortest distance from s to the final states in ofst_. template <class Arc> void PdtPrunedExpand<Arc>::SetFinalDistance(StateId s, Weight weight) { while (fdistance_.size() <= s) fdistance_.push_back(Weight::Zero()); fdistance_[s] = std::move(weight); } // Returns the PDT source state of state s in ofst_. template <class Arc> typename Arc::StateId PdtPrunedExpand<Arc>::SourceState(StateId s) const { return s < sources_.size() ? sources_[s] : kNoStateId; } // Sets the PDT source state of state s in ofst_ to state p'in ifst_. template <class Arc> void PdtPrunedExpand<Arc>::SetSourceState(StateId s, StateId p) { while (sources_.size() <= s) sources_.push_back(kNoStateId); sources_[s] = p; } // Adds state s of efst_ to ofst_ and inserts it in the queue, modifying the // flags for s accordingly. template <class Arc> void PdtPrunedExpand<Arc>::AddStateAndEnqueue(StateId s) { if (!(Flags(s) & (kEnqueued | kExpanded))) { while (ofst_->NumStates() <= s) ofst_->AddState(); queue_.Enqueue(s); SetFlags(s, kEnqueued, kEnqueued); } else if (Flags(s) & kEnqueued) { queue_.Update(s); } // TODO(allauzen): Check everything is fine when kExpanded? } // Relaxes arc out of state s in ofst_ as follows: // // 1. If the distance to s times the weight of arc is smaller than // the currently stored distance for arc.nextstate, updates // Distance(arc.nextstate) with a new estimate // 2. If fd is less than the currently stored distance from arc.nextstate to the // final state, updates with new estimate. template <class Arc> void PdtPrunedExpand<Arc>::Relax(StateId s, const Arc &arc, Weight fd) { const auto nd = Times(Distance(s), arc.weight); if (less_(nd, Distance(arc.nextstate))) { SetDistance(arc.nextstate, nd); SetSourceState(arc.nextstate, SourceState(s)); } if (less_(fd, FinalDistance(arc.nextstate))) { SetFinalDistance(arc.nextstate, fd); } VLOG(2) << "Relax: " << s << ", d[s] = " << Distance(s) << ", to " << arc.nextstate << ", d[ns] = " << Distance(arc.nextstate) << ", nd = " << nd; } // Returns whether the arc out of state s in efst needs pruned. template <class Arc> bool PdtPrunedExpand<Arc>::PruneArc(StateId s, const Arc &arc) { VLOG(2) << "Prune ?"; auto fd = Weight::Zero(); if ((cached_source_ != SourceState(s)) || (cached_stack_id_ != current_stack_id_)) { cached_source_ = SourceState(s); cached_stack_id_ = current_stack_id_; cached_dest_list_.clear(); if (cached_source_ != ifst_->Start()) { for (auto set_iter = balance_data_->Find(current_paren_id_, cached_source_); !set_iter.Done(); set_iter.Next()) { auto dest = set_iter.Element(); const auto it = dest_map_.find(dest); cached_dest_list_.push_front(*it); } } else { // TODO(allauzen): queue discipline should prevent this from ever // happening. // Replace by a check. cached_dest_list_.push_front( std::make_pair(rfst_.Start() - 1, Weight::One())); } } for (auto it = cached_dest_list_.begin(); it != cached_dest_list_.end(); ++it) { const auto d = DistanceToDest(state_table_.Tuple(arc.nextstate).state_id, it->first); fd = Plus(fd, Times(d, it->second)); } Relax(s, arc, fd); return less_(limit_, Times(Distance(s), Times(arc.weight, fd))); } // Adds start state of efst_ to ofst_, enqueues it, and initializes the distance // data structures. template <class Arc> void PdtPrunedExpand<Arc>::ProcStart() { const auto s = efst_.Start(); AddStateAndEnqueue(s); ofst_->SetStart(s); SetSourceState(s, ifst_->Start()); current_stack_id_ = 0; current_paren_id_ = -1; stack_length_.push_back(0); const auto r = rfst_.Start() - 1; cached_source_ = ifst_->Start(); cached_stack_id_ = 0; cached_dest_list_.push_front(std::make_pair(r, Weight::One())); const PdtStateTuple<StateId, StackId> tuple(r, 0); SetFinalDistance(state_table_.FindState(tuple), Weight::One()); SetDistance(s, Weight::One()); const auto d = DistanceToDest(ifst_->Start(), r); SetFinalDistance(s, d); VLOG(2) << d; } // Makes s final in ofst_ if shortest accepting path ending in s is below // threshold. template <class Arc> void PdtPrunedExpand<Arc>::ProcFinal(StateId s) { const auto weight = efst_.Final(s); if (weight == Weight::Zero()) return; if (less_(limit_, Times(Distance(s), weight))) return; ofst_->SetFinal(s, weight); } // Returns true when an arc (or meta-arc) leaving state s in efst_ is below the // threshold. When add_arc is true, arc is added to ofst_. template <class Arc> bool PdtPrunedExpand<Arc>::ProcNonParen(StateId s, const Arc &arc, bool add_arc) { VLOG(2) << "ProcNonParen: " << s << " to " << arc.nextstate << ", " << arc.ilabel << ":" << arc.olabel << " / " << arc.weight << ", add_arc = " << (add_arc ? "true" : "false"); if (PruneArc(s, arc)) return false; if (add_arc) ofst_->AddArc(s, arc); AddStateAndEnqueue(arc.nextstate); return true; } // Processes an open paren arc leaving state s in ofst_. When the arc is labeled // with an open paren, // // 1. Considers each (shortest) balanced path starting in s by taking the arc // and ending by a close paren balancing the open paren of as a meta-arc, // processing and pruning each meta-arc as a non-paren arc, inserting its // destination to the queue; // 2. if at least one of these meta-arcs has not been pruned, adds the // destination of arc to ofst_ as a new source state for the stack ID nsi, and // inserts it in the queue. template <class Arc> bool PdtPrunedExpand<Arc>::ProcOpenParen(StateId s, const Arc &arc, StackId si, StackId nsi) { // Updates the stack length when needed. while (stack_length_.size() <= nsi) stack_length_.push_back(-1); if (stack_length_[nsi] == -1) stack_length_[nsi] = stack_length_[si] + 1; const auto ns = arc.nextstate; VLOG(2) << "Open paren: " << s << "(" << state_table_.Tuple(s).state_id << ") to " << ns << "(" << state_table_.Tuple(ns).state_id << ")"; bool proc_arc = false; auto fd = Weight::Zero(); const auto paren_id = stack_.ParenId(arc.ilabel); std::forward_list<StateId> sources; for (auto set_iter = balance_data_->Find(paren_id, state_table_.Tuple(ns).state_id); !set_iter.Done(); set_iter.Next()) { sources.push_front(set_iter.Element()); } for (const auto source : sources) { VLOG(2) << "Close paren source: " << source; const internal::ParenState<Arc> paren_state(paren_id, source); for (auto it = close_paren_multimap_.find(paren_state); it != close_paren_multimap_.end() && paren_state == it->first; ++it) { auto meta_arc = it->second; const PdtStateTuple<StateId, StackId> tuple(meta_arc.nextstate, si); meta_arc.nextstate = state_table_.FindState(tuple); const auto state_id = state_table_.Tuple(ns).state_id; const auto d = DistanceToDest(state_id, source); VLOG(2) << state_id << ", " << source; VLOG(2) << "Meta arc weight = " << arc.weight << " Times " << d << " Times " << meta_arc.weight; meta_arc.weight = Times(arc.weight, Times(d, meta_arc.weight)); proc_arc |= ProcNonParen(s, meta_arc, false); fd = Plus( fd, Times(Times(DistanceToDest(state_table_.Tuple(ns).state_id, source), it->second.weight), FinalDistance(meta_arc.nextstate))); } } if (proc_arc) { VLOG(2) << "Proc open paren " << s << " to " << arc.nextstate; ofst_->AddArc( s, keep_parentheses_ ? arc : Arc(0, 0, arc.weight, arc.nextstate)); AddStateAndEnqueue(arc.nextstate); const auto nd = Times(Distance(s), arc.weight); if (less_(nd, Distance(arc.nextstate))) SetDistance(arc.nextstate, nd); // FinalDistance not necessary for source state since pruning decided using // meta-arcs above. But this is a problem with A*, hence the following. if (less_(fd, FinalDistance(arc.nextstate))) SetFinalDistance(arc.nextstate, fd); SetFlags(arc.nextstate, kSourceState, kSourceState); } return proc_arc; } // Checks that shortest path through close paren arc in efst_ is below // threshold, and if so, adds it to ofst_. template <class Arc> bool PdtPrunedExpand<Arc>::ProcCloseParen(StateId s, const Arc &arc) { const auto weight = Times(Distance(s), Times(arc.weight, FinalDistance(arc.nextstate))); if (less_(limit_, weight)) return false; ofst_->AddArc(s, keep_parentheses_ ? arc : Arc(0, 0, arc.weight, arc.nextstate)); return true; } // When state s in ofst_ is a source state for stack ID si, identifies all the // corresponding possible destination states, that is, all the states in ifst_ // that have an outgoing close paren arc balancing the incoming open paren taken // to get to s. For each such state t, computes the shortest distance from (t, // si) to the final states in ofst_. Stores this information in dest_map_. template <class Arc> void PdtPrunedExpand<Arc>::ProcDestStates(StateId s, StackId si) { if (!(Flags(s) & kSourceState)) return; if (si != current_stack_id_) { dest_map_.clear(); current_stack_id_ = si; current_paren_id_ = stack_.Top(current_stack_id_); VLOG(2) << "StackID " << si << " dequeued for first time"; } // TODO(allauzen): clean up source state business; rename current function to // ProcSourceState. SetSourceState(s, state_table_.Tuple(s).state_id); const auto paren_id = stack_.Top(si); for (auto set_iter = balance_data_->Find(paren_id, state_table_.Tuple(s).state_id); !set_iter.Done(); set_iter.Next()) { const auto dest_state = set_iter.Element(); if (dest_map_.find(dest_state) != dest_map_.end()) continue; auto dest_weight = Weight::Zero(); internal::ParenState<Arc> paren_state(paren_id, dest_state); for (auto it = close_paren_multimap_.find(paren_state); it != close_paren_multimap_.end() && paren_state == it->first; ++it) { const auto &arc = it->second; const PdtStateTuple<StateId, StackId> tuple(arc.nextstate, stack_.Pop(si)); dest_weight = Plus(dest_weight, Times(arc.weight, FinalDistance(state_table_.FindState(tuple)))); } dest_map_[dest_state] = dest_weight; VLOG(2) << "State " << dest_state << " is a dest state for stack ID " << si << " with weight " << dest_weight; } } // Expands and prunes the input PDT, writing the result in ofst. template <class Arc> void PdtPrunedExpand<Arc>::Expand(MutableFst<Arc> *ofst, const typename Arc::Weight &threshold) { ofst_ = ofst; if (error_) { ofst_->SetProperties(kError, kError); return; } ofst_->DeleteStates(); ofst_->SetInputSymbols(ifst_->InputSymbols()); ofst_->SetOutputSymbols(ifst_->OutputSymbols()); limit_ = Times(DistanceToDest(ifst_->Start(), rfst_.Start() - 1), threshold); flags_.clear(); ProcStart(); while (!queue_.Empty()) { const auto s = queue_.Head(); queue_.Dequeue(); SetFlags(s, kExpanded, kExpanded | kEnqueued); VLOG(2) << s << " dequeued!"; ProcFinal(s); StackId stack_id = state_table_.Tuple(s).stack_id; ProcDestStates(s, stack_id); for (ArcIterator<PdtExpandFst<Arc>> aiter(efst_, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); const auto nextstack_id = state_table_.Tuple(arc.nextstate).stack_id; if (stack_id == nextstack_id) { ProcNonParen(s, arc, true); } else if (stack_id == stack_.Pop(nextstack_id)) { ProcOpenParen(s, arc, stack_id, nextstack_id); } else { ProcCloseParen(s, arc); } } VLOG(2) << "d[" << s << "] = " << Distance(s) << ", fd[" << s << "] = " << FinalDistance(s); } } // Expand functions. template <class Arc> struct PdtExpandOptions { using Weight = typename Arc::Weight; bool connect; bool keep_parentheses; Weight weight_threshold; PdtExpandOptions(bool connect = true, bool keep_parentheses = false, Weight weight_threshold = Weight::Zero()) : connect(connect), keep_parentheses(keep_parentheses), weight_threshold(std::move(weight_threshold)) {} }; // Expands a pushdown transducer (PDT) encoded as an FST into an FST. This // version writes the expanded PDT to a mutable FST. In the PDT, some // transitions are labeled with open or close parentheses. To be interpreted as // a PDT, the parens must balance on a path. The open-close parenthesis label // pairs are passed using the parens argument. Expansion enforces the // parenthesis constraints. The PDT must be expandable as an FST. template <class Arc> void Expand( const Fst<Arc> &ifst, const std::vector<std::pair<typename Arc::Label, typename Arc::Label>> &parens, MutableFst<Arc> *ofst, const PdtExpandOptions<Arc> &opts) { PdtExpandFstOptions<Arc> eopts; eopts.gc_limit = 0; if (opts.weight_threshold == Arc::Weight::Zero()) { eopts.keep_parentheses = opts.keep_parentheses; *ofst = PdtExpandFst<Arc>(ifst, parens, eopts); } else { PdtPrunedExpand<Arc> pruned_expand(ifst, parens, opts.keep_parentheses); pruned_expand.Expand(ofst, opts.weight_threshold); } if (opts.connect) Connect(ofst); } // Expands a pushdown transducer (PDT) encoded as an FST into an FST. This // version writes the expanded PDT result to a mutable FST. In the PDT, some // transitions are labeled with open or close parentheses. To be interpreted as // a PDT, the parens must balance on a path. The open-close parenthesis label // pairs are passed using the parents argument. Expansion enforces the // parenthesis constraints. The PDT must be expandable as an FST. template <class Arc> void Expand(const Fst<Arc> &ifst, const std::vector<std::pair<typename Arc::Label, typename Arc::Label>> &parens, MutableFst<Arc> *ofst, bool connect = true, bool keep_parentheses = false) { const PdtExpandOptions<Arc> opts(connect, keep_parentheses); Expand(ifst, parens, ofst, opts); } } // namespace fst #endif // FST_EXTENSIONS_PDT_EXPAND_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions/compress/gzfile.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Resource handles for gzip files written to or read from stringstreams. These // are necessary to provide the compression routines with streams reading from // or writing to compressed files (or the UNIX standard streams), and are not // intended for general use. #ifndef FST_EXTENSIONS_COMPRESS_GZFILE_H_ #define FST_EXTENSIONS_COMPRESS_GZFILE_H_ #include <iostream> #include <memory> #include <sstream> #include <string> #include <fst/compat.h> #include <fst/fst.h> #include <zlib.h> using std::stringstream; using std::unique_ptr; namespace fst { // Gives the zlib gzFile type an OO-like interface. String inputs are all // C-style strings. The caller is responsible to get the file modes appropriate // for the IO methods being called, and for opening the file descriptors // if that constructor is used. The ! operator can be used to check for errors // after construction or read/writing. class GzFile { public: GzFile(const char *filename, const char *mode) : gzfile_(gzopen(filename, mode)), error_(check_handle()) { } // The caller is responsible to ensure the corresponding FD is open and has // the needed modes ("r" for reading, "w" or "a" for writing). explicit GzFile(const int fd, const char *mode) : gzfile_(gzdopen(fd, mode)), error_(check_handle()), close_me_(false) { } // If the instance was constructed from an FD, flush the buffer; otherwise, // close the file, which flushes the buffer as a side-effect. ~GzFile() { close_me_ ? gzclose(gzfile_) : gzflush(gzfile_, Z_FINISH); } inline bool operator!() const { return error_; } // Returns false on EOF and sets error if short read does not reach an EOF. int read(void *buf, unsigned int size) { int bytes_read = gzread(gzfile_, buf, size); if ((bytes_read < size) && !gzeof(gzfile_)) error_ = true; return bytes_read; } // Sets error on short writes. void write(const char *buf, unsigned int size) { if (gzwrite(gzfile_, buf, size) != size) error_ = true; } private: // gzopen and gzdopen signal failure by returning null. bool check_handle() { return gzfile_ == nullptr; } gzFile gzfile_ = nullptr; bool error_ = false; bool close_me_ = false; }; // Resource handle for writing stringstream to GzFile. class OGzFile { public: explicit OGzFile(const string &filename) : OGzFile(filename.c_str()) {} explicit OGzFile(const char *filename) : gz_(GzFile(filename, mode_)) {} explicit OGzFile(const int fd) : gz_(GzFile(fd, mode_)) {} inline bool operator!() const { return !gz_; } void write(const stringstream &ssbuf) { string sbuf = ssbuf.str(); gz_.write(sbuf.data(), sbuf.size()); } private: GzFile gz_; static constexpr auto &mode_ = "wb"; }; // Resource handle for reading stringstream from GzFile. class IGzFile { public: explicit IGzFile(const string &filename) : IGzFile(filename.c_str()) {} explicit IGzFile(const char *filename) : gz_(GzFile(filename, mode_)) {} explicit IGzFile(const int fd) : gz_(GzFile(fd, mode_)) {} inline bool operator!() const { return !gz_; } // This is a great case for "move", but GCC 4 is missing the C+11 standard // move constructor for stringstream, so a unique_ptr is the next best thing. unique_ptr<stringstream> read() { char buf[bufsize_]; unique_ptr<stringstream> sstrm(new stringstream); // We always read at least once, and the result of the last read is always // pushed onto the stringstream. We use the "write" member because << onto // a stringstream stops at the null byte, which might be data! int bytes_read; while ((bytes_read = gz_.read(buf, bufsize_)) == bufsize_) sstrm->write(buf, bufsize_); sstrm->write(buf, bytes_read); return sstrm; } private: GzFile gz_; static constexpr auto &mode_ = "rb"; // This is the same size as the default internal buffer for zlib. static const size_t bufsize_ = 8192; }; } // namespace fst #endif // FST_EXTENSIONS_COMPRESS_GZFILE_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/connect.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes and functions to remove unsuccessful paths from an FST. #ifndef FST_CONNECT_H_ #define FST_CONNECT_H_ #include <vector> #include <fst/dfs-visit.h> #include <fst/mutable-fst.h> #include <fst/union-find.h> namespace fst { // Finds and returns connected components. Use with Visit(). template <class Arc> class CcVisitor { public: using Weight = typename Arc::Weight; using StateId = typename Arc::StateId; // cc[i]: connected component number for state i. explicit CcVisitor(std::vector<StateId> *cc) : comps_(new UnionFind<StateId>(0, kNoStateId)), cc_(cc), nstates_(0) {} // comps: connected components equiv classes. explicit CcVisitor(UnionFind<StateId> *comps) : comps_(comps), cc_(nullptr), nstates_(0) {} ~CcVisitor() { if (cc_) delete comps_; } void InitVisit(const Fst<Arc> &fst) {} bool InitState(StateId s, StateId root) { ++nstates_; if (comps_->FindSet(s) == kNoStateId) comps_->MakeSet(s); return true; } bool WhiteArc(StateId s, const Arc &arc) { comps_->MakeSet(arc.nextstate); comps_->Union(s, arc.nextstate); return true; } bool GreyArc(StateId s, const Arc &arc) { comps_->Union(s, arc.nextstate); return true; } bool BlackArc(StateId s, const Arc &arc) { comps_->Union(s, arc.nextstate); return true; } void FinishState(StateId s) {} void FinishVisit() { if (cc_) GetCcVector(cc_); } // Returns number of components. // cc[i]: connected component number for state i. int GetCcVector(std::vector<StateId> *cc) { cc->clear(); cc->resize(nstates_, kNoStateId); StateId ncomp = 0; for (StateId s = 0; s < nstates_; ++s) { const auto rep = comps_->FindSet(s); auto &comp = (*cc)[rep]; if (comp == kNoStateId) { comp = ncomp; ++ncomp; } (*cc)[s] = comp; } return ncomp; } private: UnionFind<StateId> *comps_; // Components. std::vector<StateId> *cc_; // State's cc number. StateId nstates_; // State count. }; // Finds and returns strongly-connected components, accessible and // coaccessible states and related properties. Uses Tarjan's single // DFS SCC algorithm (see Aho, et al, "Design and Analysis of Computer // Algorithms", 189pp). Use with DfsVisit(); template <class Arc> class SccVisitor { public: using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // scc[i]: strongly-connected component number for state i. // SCC numbers will be in topological order for acyclic input. // access[i]: accessibility of state i. // coaccess[i]: coaccessibility of state i. // Any of above can be NULL. // props: related property bits (cyclicity, initial cyclicity, // accessibility, coaccessibility) set/cleared (o.w. unchanged). SccVisitor(std::vector<StateId> *scc, std::vector<bool> *access, std::vector<bool> *coaccess, uint64 *props) : scc_(scc), access_(access), coaccess_(coaccess), props_(props) {} explicit SccVisitor(uint64 *props) : scc_(nullptr), access_(nullptr), coaccess_(nullptr), props_(props) {} void InitVisit(const Fst<Arc> &fst); bool InitState(StateId s, StateId root); bool TreeArc(StateId s, const Arc &arc) { return true; } bool BackArc(StateId s, const Arc &arc) { const auto t = arc.nextstate; if ((*dfnumber_)[t] < (*lowlink_)[s]) (*lowlink_)[s] = (*dfnumber_)[t]; if ((*coaccess_)[t]) (*coaccess_)[s] = true; *props_ |= kCyclic; *props_ &= ~kAcyclic; if (t == start_) { *props_ |= kInitialCyclic; *props_ &= ~kInitialAcyclic; } return true; } bool ForwardOrCrossArc(StateId s, const Arc &arc) { const auto t = arc.nextstate; if ((*dfnumber_)[t] < (*dfnumber_)[s] /* cross edge */ && (*onstack_)[t] && (*dfnumber_)[t] < (*lowlink_)[s]) { (*lowlink_)[s] = (*dfnumber_)[t]; } if ((*coaccess_)[t]) (*coaccess_)[s] = true; return true; } // Last argument always ignored, but required by the interface. void FinishState(StateId state, StateId p, const Arc *); void FinishVisit() { // Numbers SCCs in topological order when acyclic. if (scc_) { for (StateId s = 0; s < scc_->size(); ++s) { (*scc_)[s] = nscc_ - 1 - (*scc_)[s]; } } if (coaccess_internal_) delete coaccess_; dfnumber_.reset(); lowlink_.reset(); onstack_.reset(); scc_stack_.reset(); } private: std::vector<StateId> *scc_; // State's scc number. std::vector<bool> *access_; // State's accessibility. std::vector<bool> *coaccess_; // State's coaccessibility. uint64 *props_; const Fst<Arc> *fst_; StateId start_; StateId nstates_; // State count. StateId nscc_; // SCC count. bool coaccess_internal_; std::unique_ptr<std::vector<StateId>> dfnumber_; // State discovery times. std::unique_ptr<std::vector<StateId>> lowlink_; // lowlink[state] == dfnumber[state] => SCC root std::unique_ptr<std::vector<bool>> onstack_; // Is a state on the SCC stack? std::unique_ptr<std::vector<StateId>> scc_stack_; // SCC stack, with random access. }; template <class Arc> inline void SccVisitor<Arc>::InitVisit(const Fst<Arc> &fst) { if (scc_) scc_->clear(); if (access_) access_->clear(); if (coaccess_) { coaccess_->clear(); coaccess_internal_ = false; } else { coaccess_ = new std::vector<bool>; coaccess_internal_ = true; } *props_ |= kAcyclic | kInitialAcyclic | kAccessible | kCoAccessible; *props_ &= ~(kCyclic | kInitialCyclic | kNotAccessible | kNotCoAccessible); fst_ = &fst; start_ = fst.Start(); nstates_ = 0; nscc_ = 0; dfnumber_.reset(new std::vector<StateId>()); lowlink_.reset(new std::vector<StateId>()); onstack_.reset(new std::vector<bool>()); scc_stack_.reset(new std::vector<StateId>()); } template <class Arc> inline bool SccVisitor<Arc>::InitState(StateId s, StateId root) { scc_stack_->push_back(s); while (dfnumber_->size() <= s) { if (scc_) scc_->push_back(-1); if (access_) access_->push_back(false); coaccess_->push_back(false); dfnumber_->push_back(-1); lowlink_->push_back(-1); onstack_->push_back(false); } (*dfnumber_)[s] = nstates_; (*lowlink_)[s] = nstates_; (*onstack_)[s] = true; if (root == start_) { if (access_) (*access_)[s] = true; } else { if (access_) (*access_)[s] = false; *props_ |= kNotAccessible; *props_ &= ~kAccessible; } ++nstates_; return true; } template <class Arc> inline void SccVisitor<Arc>::FinishState(StateId s, StateId p, const Arc *) { if (fst_->Final(s) != Weight::Zero()) (*coaccess_)[s] = true; if ((*dfnumber_)[s] == (*lowlink_)[s]) { // Root of new SCC. bool scc_coaccess = false; auto i = scc_stack_->size(); StateId t; do { t = (*scc_stack_)[--i]; if ((*coaccess_)[t]) scc_coaccess = true; } while (s != t); do { t = scc_stack_->back(); if (scc_) (*scc_)[t] = nscc_; if (scc_coaccess) (*coaccess_)[t] = true; (*onstack_)[t] = false; scc_stack_->pop_back(); } while (s != t); if (!scc_coaccess) { *props_ |= kNotCoAccessible; *props_ &= ~kCoAccessible; } ++nscc_; } if (p != kNoStateId) { if ((*coaccess_)[s]) (*coaccess_)[p] = true; if ((*lowlink_)[s] < (*lowlink_)[p]) (*lowlink_)[p] = (*lowlink_)[s]; } } // Trims an FST, removing states and arcs that are not on successful paths. // This version modifies its input. // // Complexity: // // Time: O(V + E) // Space: O(V + E) // // where V = # of states and E = # of arcs. template <class Arc> void Connect(MutableFst<Arc> *fst) { using StateId = typename Arc::StateId; std::vector<bool> access; std::vector<bool> coaccess; uint64 props = 0; SccVisitor<Arc> scc_visitor(nullptr, &access, &coaccess, &props); DfsVisit(*fst, &scc_visitor); std::vector<StateId> dstates; for (StateId s = 0; s < access.size(); ++s) { if (!access[s] || !coaccess[s]) dstates.push_back(s); } fst->DeleteStates(dstates); fst->SetProperties(kAccessible | kCoAccessible, kAccessible | kCoAccessible); } // Returns an acyclic FST where each SCC in the input FST has been condensed to // a single state with transitions between SCCs retained and within SCCs // dropped. Also populates 'scc' with a mapping from input to output states. template <class Arc> void Condense(const Fst<Arc> &ifst, MutableFst<Arc> *ofst, std::vector<typename Arc::StateId> *scc) { using StateId = typename Arc::StateId; ofst->DeleteStates(); uint64 props = 0; SccVisitor<Arc> scc_visitor(scc, nullptr, nullptr, &props); DfsVisit(ifst, &scc_visitor); for (StateId s = 0; s < scc->size(); ++s) { const auto c = (*scc)[s]; while (c >= ofst->NumStates()) ofst->AddState(); if (s == ifst.Start()) ofst->SetStart(c); const auto weight = ifst.Final(s); if (weight != Arc::Weight::Zero()) ofst->SetFinal(c, Plus(ofst->Final(c), weight)); for (ArcIterator<Fst<Arc>> aiter(ifst, s); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); const auto nextc = (*scc)[arc.nextstate]; if (nextc != c) { while (nextc >= ofst->NumStates()) ofst->AddState(); arc.nextstate = nextc; ofst->AddArc(c, arc); } } } ofst->SetProperties(kAcyclic | kInitialAcyclic, kAcyclic | kInitialAcyclic); } } // namespace fst #endif // FST_CONNECT_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_12x_8k_tflite-linux-amd64-prod-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "linux-amd64-tflite-opt" system_setup: > ${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node_tflite-tests-prod.sh 12.x 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 TFLite NodeJS 12.x prod tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v12.x on prod model, TFLite, optimized version (8kHz)"
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/tensorflow/mfcc.cc
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <math.h> #include "mfcc.h" namespace tensorflow { const double kDefaultUpperFrequencyLimit = 4000; const double kDefaultLowerFrequencyLimit = 20; const double kFilterbankFloor = 1e-12; const int kDefaultFilterbankChannelCount = 40; const int kDefaultDCTCoefficientCount = 13; Mfcc::Mfcc() : initialized_(false), lower_frequency_limit_(kDefaultLowerFrequencyLimit), upper_frequency_limit_(kDefaultUpperFrequencyLimit), filterbank_channel_count_(kDefaultFilterbankChannelCount), dct_coefficient_count_(kDefaultDCTCoefficientCount) {} bool Mfcc::Initialize(int input_length, double input_sample_rate) { bool initialized = mel_filterbank_.Initialize( input_length, input_sample_rate, filterbank_channel_count_, lower_frequency_limit_, upper_frequency_limit_); initialized &= dct_.Initialize(filterbank_channel_count_, dct_coefficient_count_); initialized_ = initialized; return initialized; } void Mfcc::Compute(const std::vector<double>& spectrogram_frame, std::vector<double>* output) const { if (!initialized_) { // LOG(ERROR) << "Mfcc not initialized."; return; } std::vector<double> working; mel_filterbank_.Compute(spectrogram_frame, &working); for (int i = 0; i < working.size(); ++i) { double val = working[i]; if (val < kFilterbankFloor) { val = kFilterbankFloor; } working[i] = log(val); } dct_.Compute(working, output); } } // namespace tensorflow
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/verify.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_VERIFY_H_ #define FST_SCRIPT_VERIFY_H_ #include <fst/verify.h> #include <fst/script/arg-packs.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using VerifyArgs = WithReturnValue<bool, const FstClass &>; template <class Arc> void Verify(VerifyArgs *args) { const Fst<Arc> &fst = *(args->args.GetFst<Arc>()); args->retval = Verify(fst); } bool Verify(const FstClass &fst); } // namespace script } // namespace fst #endif // FST_SCRIPT_VERIFY_H_
0
coqui_public_repos/TTS/recipes/ljspeech
coqui_public_repos/TTS/recipes/ljspeech/align_tts/train_aligntts.py
import os from trainer import Trainer, TrainerArgs from TTS.tts.configs.align_tts_config import AlignTTSConfig from TTS.tts.configs.shared_configs import BaseDatasetConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.align_tts import AlignTTS from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.utils.audio import AudioProcessor output_path = os.path.dirname(os.path.abspath(__file__)) # init configs dataset_config = BaseDatasetConfig( formatter="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") ) config = AlignTTSConfig( batch_size=32, eval_batch_size=16, num_loader_workers=4, num_eval_loader_workers=4, run_eval=True, test_delay_epochs=-1, epochs=1000, text_cleaner="english_cleaners", use_phonemes=False, phoneme_language="en-us", phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), print_step=25, print_eval=True, mixed_precision=False, output_path=output_path, datasets=[dataset_config], ) # INITIALIZE THE AUDIO PROCESSOR # Audio processor is used for feature extraction and audio I/O. # It mainly serves to the dataloader and the training loggers. ap = AudioProcessor.init_from_config(config) # INITIALIZE THE TOKENIZER # Tokenizer is used to convert text to sequences of token IDs. # If characters are not defined in the config, default characters are passed to the config tokenizer, config = TTSTokenizer.init_from_config(config) # LOAD DATA SAMPLES # Each sample is a list of ```[text, audio_file_path, speaker_name]``` # You can define your custom sample loader returning the list of samples. # Or define your custom formatter and pass it to the `load_tts_samples`. # Check `TTS.tts.datasets.load_tts_samples` for more details. train_samples, eval_samples = load_tts_samples( dataset_config, eval_split=True, eval_split_max_size=config.eval_split_max_size, eval_split_size=config.eval_split_size, ) # init model model = AlignTTS(config, ap, tokenizer) # INITIALIZE THE TRAINER # Trainer provides a generic API to train all the 🐸TTS models with all its perks like mixed-precision training, # distributed training, etc. trainer = Trainer( TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples ) # AND... 3,2,1... 🚀 trainer.fit()
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/m4/ltversion.m4
# ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # @configure_input@ # serial 3337 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.4.2]) m4_define([LT_PACKAGE_REVISION], [1.3337]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.4.2' macro_revision='1.3337' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ])
0
coqui_public_repos/STT-models/french/jaco-assistant
coqui_public_repos/STT-models/french/jaco-assistant/v0.0.1/MODEL_CARD.md
# Model card for French STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [DANBER](https://gitlab.com/DANBER) and released under the [Jaco-Assistant](https://gitlab.com/Jaco-Assistant) project. - Model date: Accessed from [Gitlab](https://gitlab.com/Jaco-Assistant/Scribosermo) on March 31, 2021 - Model type: `Speech-to-Text` - Model version: `v0.0.1` - Compatible with 🐸 STT version: `v0.9.3` - Code: [scribosermo](https://gitlab.com/Jaco-Assistant/Scribosermo/-/tree/master/#old-experiments) - License: GNU Lesser General Public License - Citation details: `@misc{french-jaco, author = {DANBER}, title = {French DeepSpeech for Jaco-Assistant}, publisher = {Jaco-Assistant}, journal = {Gitlab}, howpublished = {\url{https://gitlab.com/Jaco-Assistant/Scribosermo}}, commit = {dfc541d2} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [French Language](https://en.wikipedia.org/wiki/French_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates (WER) are reported on [Gitlab](https://gitlab.com/Jaco-Assistant/Scribosermo/-/tree/master#old-experiments). |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|19.5\%|9.2\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This French STT model was trained on approximately 787 hours of Common Voice + CssTen + LinguaLibre + Mailabs + Tatoeba + Voxforge. [Read more about training here](https://gitlab.com/Jaco-Assistant/Scribosermo/-/tree/master#old-experiments). ## Evaluation data This French STT model was tested on approximately 25 hours of Common Voice. [Read more about testing here](https://gitlab.com/Jaco-Assistant/Scribosermo/-/tree/master#old-experiments). ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/string.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Utilities to convert strings into FSTs. #ifndef FST_STRING_H_ #define FST_STRING_H_ #include <memory> #include <sstream> #include <string> #include <vector> #include <fst/flags.h> #include <fst/log.h> #include <fst/compact-fst.h> #include <fst/icu.h> #include <fst/mutable-fst.h> #include <fst/util.h> DECLARE_string(fst_field_separator); namespace fst { enum StringTokenType { SYMBOL = 1, BYTE = 2, UTF8 = 3 }; namespace internal { template <class Label> bool ConvertSymbolToLabel(const char *str, const SymbolTable *syms, Label unknown_label, bool allow_negative, Label *output) { int64_t n; if (syms) { n = syms->Find(str); if ((n == -1) && (unknown_label != kNoLabel)) n = unknown_label; if (n == -1 || (!allow_negative && n < 0)) { VLOG(1) << "ConvertSymbolToLabel: Symbol \"" << str << "\" is not mapped to any integer label, symbol table = " << syms->Name(); return false; } } else { char *p; n = strtoll(str, &p, 10); if (p < str + strlen(str) || (!allow_negative && n < 0)) { VLOG(1) << "ConvertSymbolToLabel: Bad label integer " << "= \"" << str << "\""; return false; } } *output = n; return true; } template <class Label> bool ConvertStringToLabels(const string &str, StringTokenType token_type, const SymbolTable *syms, Label unknown_label, bool allow_negative, std::vector<Label> *labels) { labels->clear(); if (token_type == StringTokenType::BYTE) { for (const char c : str) labels->push_back(c); } else if (token_type == StringTokenType::UTF8) { return UTF8StringToLabels(str, labels); } else { std::unique_ptr<char[]> c_str(new char[str.size() + 1]); str.copy(c_str.get(), str.size()); c_str[str.size()] = 0; std::vector<char *> vec; const string separator = "\n" + FLAGS_fst_field_separator; SplitString(c_str.get(), separator.c_str(), &vec, true); for (const char *c : vec) { Label label; if (!ConvertSymbolToLabel(c, syms, unknown_label, allow_negative, &label)) { return false; } labels->push_back(label); } } return true; } } // namespace internal // Functor for compiling a string in an FST. template <class Arc> class StringCompiler { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit StringCompiler(StringTokenType token_type, const SymbolTable *syms = nullptr, Label unknown_label = kNoLabel, bool allow_negative = false) : token_type_(token_type), syms_(syms), unknown_label_(unknown_label), allow_negative_(allow_negative) {} // Compiles string into an FST. template <class FST> bool operator()(const string &str, FST *fst) const { std::vector<Label> labels; if (!internal::ConvertStringToLabels(str, token_type_, syms_, unknown_label_, allow_negative_, &labels)) { return false; } Compile(labels, fst); return true; } template <class FST> bool operator()(const string &str, FST *fst, Weight weight) const { std::vector<Label> labels; if (!internal::ConvertStringToLabels(str, token_type_, syms_, unknown_label_, allow_negative_, &labels)) { return false; } Compile(labels, fst, std::move(weight)); return true; } private: void Compile(const std::vector<Label> &labels, MutableFst<Arc> *fst, Weight weight = Weight::One()) const { fst->DeleteStates(); while (fst->NumStates() <= labels.size()) fst->AddState(); for (StateId i = 0; i < labels.size(); ++i) { fst->AddArc(i, Arc(labels[i], labels[i], Weight::One(), i + 1)); } fst->SetStart(0); fst->SetFinal(labels.size(), std::move(weight)); } template <class Unsigned> void Compile(const std::vector<Label> &labels, CompactStringFst<Arc, Unsigned> *fst) const { fst->SetCompactElements(labels.begin(), labels.end()); } template <class Unsigned> void Compile(const std::vector<Label> &labels, CompactWeightedStringFst<Arc, Unsigned> *fst, const Weight &weight = Weight::One()) const { std::vector<std::pair<Label, Weight>> compacts; compacts.reserve(labels.size() + 1); for (StateId i = 0; i < static_cast<StateId>(labels.size()) - 1; ++i) { compacts.emplace_back(labels[i], Weight::One()); } compacts.emplace_back(!labels.empty() ? labels.back() : kNoLabel, weight); fst->SetCompactElements(compacts.begin(), compacts.end()); } const StringTokenType token_type_; const SymbolTable *syms_; // Symbol table (used when token type is symbol). const Label unknown_label_; // Label for token missing from symbol table. const bool allow_negative_; // Negative labels allowed? StringCompiler(const StringCompiler &) = delete; StringCompiler &operator=(const StringCompiler &) = delete; }; // Functor for printing a string FST as a string. template <class Arc> class StringPrinter { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit StringPrinter(StringTokenType token_type, const SymbolTable *syms = nullptr) : token_type_(token_type), syms_(syms) {} // Converts the FST into a string. If a SYMBOL-based fst, then use the sep as // the separator between symbols. If sep is nullptr, then use the last char // in the FLAGS_fst_field_separator. This is to maintain backwards // compatibility with the code before the sep argument was added. bool operator()(const Fst<Arc> &fst, string *result, const string* sep = nullptr) { if (!FstToLabels(fst)) { VLOG(1) << "StringPrinter::operator(): FST is not a string"; return false; } result->clear(); if (token_type_ == StringTokenType::SYMBOL) { std::stringstream sstrm; for (size_t i = 0; i < labels_.size(); ++i) { if (i) { if (sep == nullptr) { sstrm << *(FLAGS_fst_field_separator.rbegin()); } else { sstrm << *sep; } } if (!PrintLabel(labels_[i], sstrm)) return false; } *result = sstrm.str(); } else if (token_type_ == StringTokenType::BYTE) { return LabelsToByteString(labels_, result); } else if (token_type_ == StringTokenType::UTF8) { return LabelsToUTF8String(labels_, result); } else { VLOG(1) << "StringPrinter::operator(): Unknown token type: " << token_type_; return false; } return true; } private: bool FstToLabels(const Fst<Arc> &fst) { labels_.clear(); auto s = fst.Start(); if (s == kNoStateId) { VLOG(2) << "StringPrinter::FstToLabels: Invalid starting state for " << "string FST"; return false; } while (fst.Final(s) == Weight::Zero()) { ArcIterator<Fst<Arc>> aiter(fst, s); if (aiter.Done()) { VLOG(2) << "StringPrinter::FstToLabels: String FST traversal does " << "not reach final state"; return false; } const auto &arc = aiter.Value(); labels_.push_back(arc.olabel); s = arc.nextstate; if (s == kNoStateId) { VLOG(2) << "StringPrinter::FstToLabels: Transition to invalid state"; return false; } aiter.Next(); if (!aiter.Done()) { VLOG(2) << "StringPrinter::FstToLabels: State with multiple " << "outgoing arcs found"; return false; } } return true; } bool PrintLabel(Label label, std::ostream &ostrm) { if (syms_) { const auto symbol = syms_->Find(label); if (symbol == "") { VLOG(2) << "StringPrinter::PrintLabel: Integer " << label << " is not " << "mapped to any textual symbol, symbol table = " << syms_->Name(); return false; } ostrm << symbol; } else { ostrm << label; } return true; } const StringTokenType token_type_; const SymbolTable *syms_; // Symbol table (used when token type is symbol). std::vector<Label> labels_; // Input FST labels. StringPrinter(const StringPrinter &) = delete; StringPrinter &operator=(const StringPrinter &) = delete; }; } // namespace fst #endif // FST_STRING_H_
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/web_microphone_websocket/Readme.md
# Web Microphone Websocket This is an example of a ReactJS web application streaming microphone audio from the browser to a NodeJS server and transmitting the STT results back to the browser. #### Download a pre-trained model and scorer from the [Coqui Model Zoo](https://coqui.ai/models) #### Install: ``` yarn install ``` #### Run ReactJS Client: ``` yarn start ``` #### Run NodeJS Server (in a separate terminal window): ``` node server.js ```
0
coqui_public_repos/inference-engine/third_party/kenlm/util
coqui_public_repos/inference-engine/third_party/kenlm/util/double-conversion/diy-fp.h
// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef DOUBLE_CONVERSION_DIY_FP_H_ #define DOUBLE_CONVERSION_DIY_FP_H_ #include "utils.h" namespace kenlm_double_conversion { // This "Do It Yourself Floating Point" class implements a floating-point number // with a uint64 significand and an int exponent. Normalized DiyFp numbers will // have the most significant bit of the significand set. // Multiplication and Subtraction do not normalize their results. // DiyFp are not designed to contain special doubles (NaN and Infinity). class DiyFp { public: static const int kSignificandSize = 64; DiyFp() : f_(0), e_(0) {} DiyFp(uint64_t significand, int exponent) : f_(significand), e_(exponent) {} // this = this - other. // The exponents of both numbers must be the same and the significand of this // must be bigger than the significand of other. // The result will not be normalized. void Subtract(const DiyFp& other) { ASSERT(e_ == other.e_); ASSERT(f_ >= other.f_); f_ -= other.f_; } // Returns a - b. // The exponents of both numbers must be the same and this must be bigger // than other. The result will not be normalized. static DiyFp Minus(const DiyFp& a, const DiyFp& b) { DiyFp result = a; result.Subtract(b); return result; } // this = this * other. void Multiply(const DiyFp& other); // returns a * b; static DiyFp Times(const DiyFp& a, const DiyFp& b) { DiyFp result = a; result.Multiply(b); return result; } void Normalize() { ASSERT(f_ != 0); uint64_t significand = f_; int exponent = e_; // This method is mainly called for normalizing boundaries. In general // boundaries need to be shifted by 10 bits. We thus optimize for this case. const uint64_t k10MSBits = UINT64_2PART_C(0xFFC00000, 00000000); while ((significand & k10MSBits) == 0) { significand <<= 10; exponent -= 10; } while ((significand & kUint64MSB) == 0) { significand <<= 1; exponent--; } f_ = significand; e_ = exponent; } static DiyFp Normalize(const DiyFp& a) { DiyFp result = a; result.Normalize(); return result; } uint64_t f() const { return f_; } int e() const { return e_; } void set_f(uint64_t new_value) { f_ = new_value; } void set_e(int new_value) { e_ = new_value; } private: static const uint64_t kUint64MSB = UINT64_2PART_C(0x80000000, 00000000); uint64_t f_; int e_; }; } // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_DIY_FP_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_35_8k-linux-amd64-prod_pbmodel-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-cpu-opt" args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python-tests-prod.sh 3.5.8:m 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU Python v3.5 prod tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on Python v3.5 on prod model, CPU only, optimized version (8kHz)"
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/wasm/README.md
# STT WebAssembly example This directory contains examples of STT running in a web page and processing audio files: - `index.html`: STT running in a web page, processing a manually provided audio file; - `index_worker.html`: STT running in a [WebWorker](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API) within a web page, processing a manually provided audio file; ## Install Install NPM modules (only used to serve the web page): ``` npm install ``` Download the latest version of the STT library: ``` npm run download ``` (Optional) Download a pre-trained model and scorer from the [Coqui Model Zoo](https://coqui.ai/models) to the root of the project: ``` mkdir models cd models mv $HOME/Downloads/model.tflite . mv $HOME/Downloads/huge-vocab.scorer . cd .. ``` ## Run Serve the demo: ``` npm run start ```
0
coqui_public_repos
coqui_public_repos/coqpit/LICENSE.txt
MIT License Copyright (c) 2021 Coqui Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/pdt/Makefile.in
# Makefile.in generated by automake 1.14.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @HAVE_BIN_TRUE@bin_PROGRAMS = pdtcompose$(EXEEXT) pdtexpand$(EXEEXT) \ @HAVE_BIN_TRUE@ pdtinfo$(EXEEXT) pdtreplace$(EXEEXT) \ @HAVE_BIN_TRUE@ pdtreverse$(EXEEXT) pdtshortestpath$(EXEEXT) subdir = src/extensions/pdt DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @HAVE_SCRIPT_TRUE@libfstpdtscript_la_DEPENDENCIES = \ @HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__libfstpdtscript_la_SOURCES_DIST = getters.cc pdtscript.cc @HAVE_SCRIPT_TRUE@am_libfstpdtscript_la_OBJECTS = getters.lo \ @HAVE_SCRIPT_TRUE@ pdtscript.lo libfstpdtscript_la_OBJECTS = $(am_libfstpdtscript_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libfstpdtscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) $(libfstpdtscript_la_LDFLAGS) \ $(LDFLAGS) -o $@ @HAVE_SCRIPT_TRUE@am_libfstpdtscript_la_rpath = -rpath $(libdir) PROGRAMS = $(bin_PROGRAMS) am__pdtcompose_SOURCES_DIST = pdtcompose.cc @HAVE_BIN_TRUE@am_pdtcompose_OBJECTS = pdtcompose.$(OBJEXT) pdtcompose_OBJECTS = $(am_pdtcompose_OBJECTS) pdtcompose_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtcompose_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__pdtexpand_SOURCES_DIST = pdtexpand.cc @HAVE_BIN_TRUE@am_pdtexpand_OBJECTS = pdtexpand.$(OBJEXT) pdtexpand_OBJECTS = $(am_pdtexpand_OBJECTS) pdtexpand_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtexpand_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__pdtinfo_SOURCES_DIST = pdtinfo.cc @HAVE_BIN_TRUE@am_pdtinfo_OBJECTS = pdtinfo.$(OBJEXT) pdtinfo_OBJECTS = $(am_pdtinfo_OBJECTS) pdtinfo_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtinfo_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__pdtreplace_SOURCES_DIST = pdtreplace.cc @HAVE_BIN_TRUE@am_pdtreplace_OBJECTS = pdtreplace.$(OBJEXT) pdtreplace_OBJECTS = $(am_pdtreplace_OBJECTS) pdtreplace_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtreplace_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__pdtreverse_SOURCES_DIST = pdtreverse.cc @HAVE_BIN_TRUE@am_pdtreverse_OBJECTS = pdtreverse.$(OBJEXT) pdtreverse_OBJECTS = $(am_pdtreverse_OBJECTS) pdtreverse_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtreverse_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__pdtshortestpath_SOURCES_DIST = pdtshortestpath.cc @HAVE_BIN_TRUE@am_pdtshortestpath_OBJECTS = pdtshortestpath.$(OBJEXT) pdtshortestpath_OBJECTS = $(am_pdtshortestpath_OBJECTS) pdtshortestpath_LDADD = $(LDADD) @HAVE_BIN_TRUE@pdtshortestpath_DEPENDENCIES = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libfstpdtscript_la_SOURCES) $(pdtcompose_SOURCES) \ $(pdtexpand_SOURCES) $(pdtinfo_SOURCES) $(pdtreplace_SOURCES) \ $(pdtreverse_SOURCES) $(pdtshortestpath_SOURCES) DIST_SOURCES = $(am__libfstpdtscript_la_SOURCES_DIST) \ $(am__pdtcompose_SOURCES_DIST) $(am__pdtexpand_SOURCES_DIST) \ $(am__pdtinfo_SOURCES_DIST) $(am__pdtreplace_SOURCES_DIST) \ $(am__pdtreverse_SOURCES_DIST) \ $(am__pdtshortestpath_SOURCES_DIST) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) @HAVE_BIN_TRUE@LDADD = libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) @HAVE_BIN_TRUE@pdtcompose_SOURCES = pdtcompose.cc @HAVE_BIN_TRUE@pdtexpand_SOURCES = pdtexpand.cc @HAVE_BIN_TRUE@pdtinfo_SOURCES = pdtinfo.cc @HAVE_BIN_TRUE@pdtreplace_SOURCES = pdtreplace.cc @HAVE_BIN_TRUE@pdtreverse_SOURCES = pdtreverse.cc @HAVE_BIN_TRUE@pdtshortestpath_SOURCES = pdtshortestpath.cc @HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstpdtscript.la @HAVE_SCRIPT_TRUE@libfstpdtscript_la_SOURCES = getters.cc pdtscript.cc @HAVE_SCRIPT_TRUE@libfstpdtscript_la_LDFLAGS = -version-info 10:0:0 @HAVE_SCRIPT_TRUE@libfstpdtscript_la_LIBADD = ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cc .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/pdt/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/extensions/pdt/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libfstpdtscript.la: $(libfstpdtscript_la_OBJECTS) $(libfstpdtscript_la_DEPENDENCIES) $(EXTRA_libfstpdtscript_la_DEPENDENCIES) $(AM_V_CXXLD)$(libfstpdtscript_la_LINK) $(am_libfstpdtscript_la_rpath) $(libfstpdtscript_la_OBJECTS) $(libfstpdtscript_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list pdtcompose$(EXEEXT): $(pdtcompose_OBJECTS) $(pdtcompose_DEPENDENCIES) $(EXTRA_pdtcompose_DEPENDENCIES) @rm -f pdtcompose$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtcompose_OBJECTS) $(pdtcompose_LDADD) $(LIBS) pdtexpand$(EXEEXT): $(pdtexpand_OBJECTS) $(pdtexpand_DEPENDENCIES) $(EXTRA_pdtexpand_DEPENDENCIES) @rm -f pdtexpand$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtexpand_OBJECTS) $(pdtexpand_LDADD) $(LIBS) pdtinfo$(EXEEXT): $(pdtinfo_OBJECTS) $(pdtinfo_DEPENDENCIES) $(EXTRA_pdtinfo_DEPENDENCIES) @rm -f pdtinfo$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtinfo_OBJECTS) $(pdtinfo_LDADD) $(LIBS) pdtreplace$(EXEEXT): $(pdtreplace_OBJECTS) $(pdtreplace_DEPENDENCIES) $(EXTRA_pdtreplace_DEPENDENCIES) @rm -f pdtreplace$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtreplace_OBJECTS) $(pdtreplace_LDADD) $(LIBS) pdtreverse$(EXEEXT): $(pdtreverse_OBJECTS) $(pdtreverse_DEPENDENCIES) $(EXTRA_pdtreverse_DEPENDENCIES) @rm -f pdtreverse$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtreverse_OBJECTS) $(pdtreverse_LDADD) $(LIBS) pdtshortestpath$(EXEEXT): $(pdtshortestpath_OBJECTS) $(pdtshortestpath_DEPENDENCIES) $(EXTRA_pdtshortestpath_DEPENDENCIES) @rm -f pdtshortestpath$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(pdtshortestpath_OBJECTS) $(pdtshortestpath_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getters.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtcompose.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtexpand.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtinfo.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtreplace.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtreverse.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtscript.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdtshortestpath.Po@am__quote@ .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cc.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) install-binPROGRAMS: install-libLTLIBRARIES installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \ uninstall-libLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/kenlm/COPYING.3
GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: <program> Copyright (C) <year> <name of author> This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <http://www.gnu.org/licenses/>. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/replace-util.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Utility classes for the recursive replacement of FSTs (RTNs). #ifndef FST_REPLACE_UTIL_H_ #define FST_REPLACE_UTIL_H_ #include <map> #include <unordered_map> #include <unordered_set> #include <vector> #include <fst/log.h> #include <fst/connect.h> #include <fst/mutable-fst.h> #include <fst/topsort.h> #include <fst/vector-fst.h> namespace fst { // This specifies what labels to output on the call or return arc. Note that // REPLACE_LABEL_INPUT and REPLACE_LABEL_OUTPUT will produce transducers when // applied to acceptors. enum ReplaceLabelType { // Epsilon labels on both input and output. REPLACE_LABEL_NEITHER = 1, // Non-epsilon labels on input and epsilon on output. REPLACE_LABEL_INPUT = 2, // Epsilon on input and non-epsilon on output. REPLACE_LABEL_OUTPUT = 3, // Non-epsilon labels on both input and output. REPLACE_LABEL_BOTH = 4 }; // By default ReplaceUtil will copy the input label of the replace arc. // The call_label_type and return_label_type options specify how to manage // the labels of the call arc and the return arc of the replace FST struct ReplaceUtilOptions { int64_t root; // Root rule for expansion. ReplaceLabelType call_label_type; // How to label call arc. ReplaceLabelType return_label_type; // How to label return arc. int64_t return_label; // Label to put on return arc. explicit ReplaceUtilOptions( int64_t root = kNoLabel, ReplaceLabelType call_label_type = REPLACE_LABEL_INPUT, ReplaceLabelType return_label_type = REPLACE_LABEL_NEITHER, int64_t return_label = 0) : root(root), call_label_type(call_label_type), return_label_type(return_label_type), return_label(return_label) {} // For backwards compatibility. ReplaceUtilOptions(int64_t root, bool epsilon_replace_arc) : ReplaceUtilOptions(root, epsilon_replace_arc ? REPLACE_LABEL_NEITHER : REPLACE_LABEL_INPUT) {} }; // Every non-terminal on a path appears as the first label on that path in every // FST associated with a given SCC of the replace dependency graph. This would // be true if the SCC were formed from left-linear grammar rules. constexpr uint8_t kReplaceSCCLeftLinear = 0x01; // Every non-terminal on a path appears as the final label on that path in every // FST associated with a given SCC of the replace dependency graph. This would // be true if the SCC were formed from right-linear grammar rules. constexpr uint8_t kReplaceSCCRightLinear = 0x02; // The SCC in the replace dependency graph has more than one state or a // self-loop. constexpr uint8_t kReplaceSCCNonTrivial = 0x04; // Defined in replace.h. template <class Arc> void Replace( const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &, MutableFst<Arc> *, const ReplaceUtilOptions &); // Utility class for the recursive replacement of FSTs (RTNs). The user provides // a set of label/FST pairs at construction. These are used by methods for // testing cyclic dependencies and connectedness and doing RTN connection and // specific FST replacement by label or for various optimization properties. The // modified results can be obtained with the GetFstPairs() or // GetMutableFstPairs() methods. template <class Arc> class ReplaceUtil { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using FstPair = std::pair<Label, const Fst<Arc> *>; using MutableFstPair = std::pair<Label, MutableFst<Arc> *>; using NonTerminalHash = std::unordered_map<Label, Label>; // Constructs from mutable FSTs; FST ownership is given to ReplaceUtil. ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs, const ReplaceUtilOptions &opts); // Constructs from FSTs; FST ownership is retained by caller. ReplaceUtil(const std::vector<FstPair> &fst_pairs, const ReplaceUtilOptions &opts); // Constructs from ReplaceFst internals; FST ownership is retained by caller. ReplaceUtil(const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array, const NonTerminalHash &nonterminal_hash, const ReplaceUtilOptions &opts); ~ReplaceUtil() { for (Label i = 0; i < fst_array_.size(); ++i) delete fst_array_[i]; } // True if the non-terminal dependencies are cyclic. Cyclic dependencies will // result in an unexpandable FST. bool CyclicDependencies() const { GetDependencies(false); return depprops_ & kCyclic; } // Returns the strongly-connected component ID in the dependency graph of the // replace FSTS. StateId SCC(Label label) const { GetDependencies(false); const auto it = nonterminal_hash_.find(label); if (it == nonterminal_hash_.end()) return kNoStateId; return depscc_[it->second]; } // Returns properties for the strongly-connected component in the dependency // graph of the replace FSTs. If the SCC is kReplaceSCCLeftLinear or // kReplaceSCCRightLinear, that SCC can be represented as finite-state despite // any cyclic dependencies, but not by the usual replacement operation (see // fst/extensions/pdt/replace.h). uint8_t SCCProperties(StateId scc_id) { GetSCCProperties(); return depsccprops_[scc_id]; } // Returns true if no useless FSTs, states or transitions are present in the // RTN. bool Connected() const { GetDependencies(false); uint64_t props = kAccessible | kCoAccessible; for (Label i = 0; i < fst_array_.size(); ++i) { if (!fst_array_[i]) continue; if (fst_array_[i]->Properties(props, true) != props || !depaccess_[i]) { return false; } } return true; } // Removes useless FSTs, states and transitions from the RTN. void Connect(); // Replaces FSTs specified by labels, unless there are cyclic dependencies. void ReplaceLabels(const std::vector<Label> &labels); // Replaces FSTs that have at most nstates states, narcs arcs and nnonterm // non-terminals (updating in reverse dependency order), unless there are // cyclic dependencies. void ReplaceBySize(size_t nstates, size_t narcs, size_t nnonterms); // Replaces singleton FSTS, unless there are cyclic dependencies. void ReplaceTrivial() { ReplaceBySize(2, 1, 1); } // Replaces non-terminals that have at most ninstances instances (updating in // dependency order), unless there are cyclic dependencies. void ReplaceByInstances(size_t ninstances); // Replaces non-terminals that have only one instance, unless there are cyclic // dependencies. void ReplaceUnique() { ReplaceByInstances(1); } // Returns label/FST pairs, retaining FST ownership. void GetFstPairs(std::vector<FstPair> *fst_pairs); // Returns label/mutable FST pairs, giving FST ownership over to the caller. void GetMutableFstPairs(std::vector<MutableFstPair> *mutable_fst_pairs); private: // FST statistics. struct ReplaceStats { StateId nstates; // Number of states. StateId nfinal; // Number of final states. size_t narcs; // Number of arcs. Label nnonterms; // Number of non-terminals in FST. size_t nref; // Number of non-terminal instances referring to this FST. // Number of times that ith FST references this FST std::map<Label, size_t> inref; // Number of times that this FST references the ith FST std::map<Label, size_t> outref; ReplaceStats() : nstates(0), nfinal(0), narcs(0), nnonterms(0), nref(0) {} }; // Checks that Mutable FSTs exists, creating them if necessary. void CheckMutableFsts(); // Computes the dependency graph for the RTN, computing dependency statistics // if stats is true. void GetDependencies(bool stats) const; void ClearDependencies() const { depfst_.DeleteStates(); stats_.clear(); depprops_ = 0; depsccprops_.clear(); have_stats_ = false; } // Gets topological order of dependencies, returning false with cyclic input. bool GetTopOrder(const Fst<Arc> &fst, std::vector<Label> *toporder) const; // Updates statistics to reflect the replacement of the jth FST. void UpdateStats(Label j); // Computes the properties for the strongly-connected component in the // dependency graph of the replace FSTs. void GetSCCProperties() const; Label root_label_; // Root non-terminal. Label root_fst_; // Root FST ID. ReplaceLabelType call_label_type_; // See Replace(). ReplaceLabelType return_label_type_; // See Replace(). int64_t return_label_; // See Replace(). std::vector<const Fst<Arc> *> fst_array_; // FST per ID. std::vector<MutableFst<Arc> *> mutable_fst_array_; // Mutable FST per ID. std::vector<Label> nonterminal_array_; // FST ID to non-terminal. NonTerminalHash nonterminal_hash_; // Non-terminal to FST ID. mutable VectorFst<Arc> depfst_; // FST ID dependencies. mutable std::vector<StateId> depscc_; // FST SCC ID. mutable std::vector<bool> depaccess_; // FST ID accessibility. mutable uint64_t depprops_; // Dependency FST props. mutable bool have_stats_; // Have dependency statistics? mutable std::vector<ReplaceStats> stats_; // Per-FST statistics. mutable std::vector<uint8_t> depsccprops_; // SCC properties. ReplaceUtil(const ReplaceUtil &) = delete; ReplaceUtil &operator=(const ReplaceUtil &) = delete; }; template <class Arc> ReplaceUtil<Arc>::ReplaceUtil(const std::vector<MutableFstPair> &fst_pairs, const ReplaceUtilOptions &opts) : root_label_(opts.root), call_label_type_(opts.call_label_type), return_label_type_(opts.return_label_type), return_label_(opts.return_label), depprops_(0), have_stats_(false) { fst_array_.push_back(nullptr); mutable_fst_array_.push_back(nullptr); nonterminal_array_.push_back(kNoLabel); for (const auto &fst_pair : fst_pairs) { const auto label = fst_pair.first; auto *fst = fst_pair.second; nonterminal_hash_[label] = fst_array_.size(); nonterminal_array_.push_back(label); fst_array_.push_back(fst); mutable_fst_array_.push_back(fst); } root_fst_ = nonterminal_hash_[root_label_]; if (!root_fst_) { FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_; } } template <class Arc> ReplaceUtil<Arc>::ReplaceUtil(const std::vector<FstPair> &fst_pairs, const ReplaceUtilOptions &opts) : root_label_(opts.root), call_label_type_(opts.call_label_type), return_label_type_(opts.return_label_type), return_label_(opts.return_label), depprops_(0), have_stats_(false) { fst_array_.push_back(nullptr); nonterminal_array_.push_back(kNoLabel); for (const auto &fst_pair : fst_pairs) { const auto label = fst_pair.first; const auto *fst = fst_pair.second; nonterminal_hash_[label] = fst_array_.size(); nonterminal_array_.push_back(label); fst_array_.push_back(fst->Copy()); } root_fst_ = nonterminal_hash_[root_label_]; if (!root_fst_) { FSTERROR() << "ReplaceUtil: No root FST for label: " << root_label_; } } template <class Arc> ReplaceUtil<Arc>::ReplaceUtil( const std::vector<std::unique_ptr<const Fst<Arc>>> &fst_array, const NonTerminalHash &nonterminal_hash, const ReplaceUtilOptions &opts) : root_fst_(opts.root), call_label_type_(opts.call_label_type), return_label_type_(opts.return_label_type), return_label_(opts.return_label), nonterminal_array_(fst_array.size()), nonterminal_hash_(nonterminal_hash), depprops_(0), have_stats_(false) { fst_array_.push_back(nullptr); for (size_t i = 1; i < fst_array.size(); ++i) { fst_array_.push_back(fst_array[i]->Copy()); } for (auto it = nonterminal_hash.begin(); it != nonterminal_hash.end(); ++it) { nonterminal_array_[it->second] = it->first; } root_label_ = nonterminal_array_[root_fst_]; } template <class Arc> void ReplaceUtil<Arc>::GetDependencies(bool stats) const { if (depfst_.NumStates() > 0) { if (stats && !have_stats_) { ClearDependencies(); } else { return; } } have_stats_ = stats; if (have_stats_) stats_.reserve(fst_array_.size()); for (Label i = 0; i < fst_array_.size(); ++i) { depfst_.AddState(); depfst_.SetFinal(i, Weight::One()); if (have_stats_) stats_.push_back(ReplaceStats()); } depfst_.SetStart(root_fst_); // An arc from each state (representing the FST) to the state representing the // FST being replaced for (Label i = 0; i < fst_array_.size(); ++i) { const auto *ifst = fst_array_[i]; if (!ifst) continue; for (StateIterator<Fst<Arc>> siter(*ifst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (have_stats_) { ++stats_[i].nstates; if (ifst->Final(s) != Weight::Zero()) ++stats_[i].nfinal; } for (ArcIterator<Fst<Arc>> aiter(*ifst, s); !aiter.Done(); aiter.Next()) { if (have_stats_) ++stats_[i].narcs; const auto &arc = aiter.Value(); auto it = nonterminal_hash_.find(arc.olabel); if (it != nonterminal_hash_.end()) { const auto j = it->second; depfst_.AddArc(i, Arc(arc.olabel, arc.olabel, Weight::One(), j)); if (have_stats_) { ++stats_[i].nnonterms; ++stats_[j].nref; ++stats_[j].inref[i]; ++stats_[i].outref[j]; } } } } } // Computes accessibility info. SccVisitor<Arc> scc_visitor(&depscc_, &depaccess_, nullptr, &depprops_); DfsVisit(depfst_, &scc_visitor); } template <class Arc> void ReplaceUtil<Arc>::UpdateStats(Label j) { if (!have_stats_) { FSTERROR() << "ReplaceUtil::UpdateStats: Stats not available"; return; } if (j == root_fst_) return; // Can't replace root. for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) { const auto i = in->first; const auto ni = in->second; stats_[i].nstates += stats_[j].nstates * ni; stats_[i].narcs += (stats_[j].narcs + 1) * ni; stats_[i].nnonterms += (stats_[j].nnonterms - 1) * ni; stats_[i].outref.erase(j); for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end(); ++out) { const auto k = out->first; const auto nk = out->second; stats_[i].outref[k] += ni * nk; } } for (auto out = stats_[j].outref.begin(); out != stats_[j].outref.end(); ++out) { const auto k = out->first; const auto nk = out->second; stats_[k].nref -= nk; stats_[k].inref.erase(j); for (auto in = stats_[j].inref.begin(); in != stats_[j].inref.end(); ++in) { const auto i = in->first; const auto ni = in->second; stats_[k].inref[i] += ni * nk; stats_[k].nref += ni * nk; } } } template <class Arc> void ReplaceUtil<Arc>::CheckMutableFsts() { if (mutable_fst_array_.empty()) { for (Label i = 0; i < fst_array_.size(); ++i) { if (!fst_array_[i]) { mutable_fst_array_.push_back(nullptr); } else { mutable_fst_array_.push_back(new VectorFst<Arc>(*fst_array_[i])); delete fst_array_[i]; fst_array_[i] = mutable_fst_array_[i]; } } } } template <class Arc> void ReplaceUtil<Arc>::Connect() { CheckMutableFsts(); static constexpr auto props = kAccessible | kCoAccessible; for (auto *mutable_fst : mutable_fst_array_) { if (!mutable_fst) continue; if (mutable_fst->Properties(props, false) != props) { fst::Connect(mutable_fst); } } GetDependencies(false); for (Label i = 0; i < mutable_fst_array_.size(); ++i) { auto *fst = mutable_fst_array_[i]; if (fst && !depaccess_[i]) { delete fst; fst_array_[i] = nullptr; mutable_fst_array_[i] = nullptr; } } ClearDependencies(); } template <class Arc> bool ReplaceUtil<Arc>::GetTopOrder(const Fst<Arc> &fst, std::vector<Label> *toporder) const { // Finds topological order of dependencies. std::vector<StateId> order; bool acyclic = false; TopOrderVisitor<Arc> top_order_visitor(&order, &acyclic); DfsVisit(fst, &top_order_visitor); if (!acyclic) { LOG(WARNING) << "ReplaceUtil::GetTopOrder: Cyclical label dependencies"; return false; } toporder->resize(order.size()); for (Label i = 0; i < order.size(); ++i) (*toporder)[order[i]] = i; return true; } template <class Arc> void ReplaceUtil<Arc>::ReplaceLabels(const std::vector<Label> &labels) { CheckMutableFsts(); std::unordered_set<Label> label_set; for (const auto label : labels) { // Can't replace root. if (label != root_label_) label_set.insert(label); } // Finds FST dependencies restricted to the labels requested. GetDependencies(false); VectorFst<Arc> pfst(depfst_); for (StateId i = 0; i < pfst.NumStates(); ++i) { std::vector<Arc> arcs; for (ArcIterator<VectorFst<Arc>> aiter(pfst, i); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); const auto label = nonterminal_array_[arc.nextstate]; if (label_set.count(label) > 0) arcs.push_back(arc); } pfst.DeleteArcs(i); for (const auto &arc : arcs) pfst.AddArc(i, arc); } std::vector<Label> toporder; if (!GetTopOrder(pfst, &toporder)) { ClearDependencies(); return; } // Visits FSTs in reverse topological order of dependencies and performs // replacements. for (Label o = toporder.size() - 1; o >= 0; --o) { std::vector<FstPair> fst_pairs; auto s = toporder[o]; for (ArcIterator<VectorFst<Arc>> aiter(pfst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); const auto label = nonterminal_array_[arc.nextstate]; const auto *fst = fst_array_[arc.nextstate]; fst_pairs.push_back(std::make_pair(label, fst)); } if (fst_pairs.empty()) continue; const auto label = nonterminal_array_[s]; const auto *fst = fst_array_[s]; fst_pairs.push_back(std::make_pair(label, fst)); const ReplaceUtilOptions opts(label, call_label_type_, return_label_type_, return_label_); Replace(fst_pairs, mutable_fst_array_[s], opts); } ClearDependencies(); } template <class Arc> void ReplaceUtil<Arc>::ReplaceBySize(size_t nstates, size_t narcs, size_t nnonterms) { std::vector<Label> labels; GetDependencies(true); std::vector<Label> toporder; if (!GetTopOrder(depfst_, &toporder)) { ClearDependencies(); return; } for (Label o = toporder.size() - 1; o >= 0; --o) { const auto j = toporder[o]; if (stats_[j].nstates <= nstates && stats_[j].narcs <= narcs && stats_[j].nnonterms <= nnonterms) { labels.push_back(nonterminal_array_[j]); UpdateStats(j); } } ReplaceLabels(labels); } template <class Arc> void ReplaceUtil<Arc>::ReplaceByInstances(size_t ninstances) { std::vector<Label> labels; GetDependencies(true); std::vector<Label> toporder; if (!GetTopOrder(depfst_, &toporder)) { ClearDependencies(); return; } for (Label o = 0; o < toporder.size(); ++o) { const auto j = toporder[o]; if (stats_[j].nref <= ninstances) { labels.push_back(nonterminal_array_[j]); UpdateStats(j); } } ReplaceLabels(labels); } template <class Arc> void ReplaceUtil<Arc>::GetFstPairs(std::vector<FstPair> *fst_pairs) { CheckMutableFsts(); fst_pairs->clear(); for (Label i = 0; i < fst_array_.size(); ++i) { const auto label = nonterminal_array_[i]; const auto *fst = fst_array_[i]; if (!fst) continue; fst_pairs->push_back(std::make_pair(label, fst)); } } template <class Arc> void ReplaceUtil<Arc>::GetMutableFstPairs( std::vector<MutableFstPair> *mutable_fst_pairs) { CheckMutableFsts(); mutable_fst_pairs->clear(); for (Label i = 0; i < mutable_fst_array_.size(); ++i) { const auto label = nonterminal_array_[i]; const auto *fst = mutable_fst_array_[i]; if (!fst) continue; mutable_fst_pairs->push_back(std::make_pair(label, fst->Copy())); } } template <class Arc> void ReplaceUtil<Arc>::GetSCCProperties() const { if (!depsccprops_.empty()) return; GetDependencies(false); if (depscc_.empty()) return; for (StateId scc = 0; scc < depscc_.size(); ++scc) { depsccprops_.push_back(kReplaceSCCLeftLinear | kReplaceSCCRightLinear); } if (!(depprops_ & kCyclic)) return; // No cyclic dependencies. // Checks for self-loops in the dependency graph. for (StateId scc = 0; scc < depscc_.size(); ++scc) { for (ArcIterator<Fst<Arc> > aiter(depfst_, scc); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); if (arc.nextstate == scc) { // SCC has a self loop. depsccprops_[scc] |= kReplaceSCCNonTrivial; } } } std::vector<bool> depscc_visited(depscc_.size(), false); for (Label i = 0; i < fst_array_.size(); ++i) { const auto *fst = fst_array_[i]; if (!fst) continue; const auto depscc = depscc_[i]; if (depscc_visited[depscc]) { // SCC has more than one state. depsccprops_[depscc] |= kReplaceSCCNonTrivial; } depscc_visited[depscc] = true; std::vector<StateId> fstscc; // SCCs of the current FST. uint64_t fstprops; SccVisitor<Arc> scc_visitor(&fstscc, nullptr, nullptr, &fstprops); DfsVisit(*fst, &scc_visitor); for (StateIterator<Fst<Arc>> siter(*fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); for (ArcIterator<Fst<Arc>> aiter(*fst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); auto it = nonterminal_hash_.find(arc.olabel); if (it == nonterminal_hash_.end() || depscc_[it->second] != depscc) { continue; // Skips if a terminal or a non-terminal not in SCC. } const bool arc_in_cycle = fstscc[s] == fstscc[arc.nextstate]; // Left linear iff all non-terminals are initial. if (s != fst->Start() || arc_in_cycle) { depsccprops_[depscc] &= ~kReplaceSCCLeftLinear; } // Right linear iff all non-terminals are final. if (fst->Final(arc.nextstate) == Weight::Zero() || arc_in_cycle) { depsccprops_[depscc] &= ~kReplaceSCCRightLinear; } } } } } } // namespace fst #endif // FST_REPLACE_UTIL_H_
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/pool.cc
#include "util/pool.hh" #include "util/scoped.hh" #include <cstdlib> #include <algorithm> namespace util { Pool::Pool() { current_ = NULL; current_end_ = NULL; } Pool::~Pool() { FreeAll(); } void Pool::FreeAll() { for (std::vector<void *>::const_iterator i(free_list_.begin()); i != free_list_.end(); ++i) { free(*i); } free_list_.clear(); current_ = NULL; current_end_ = NULL; } void *Pool::More(std::size_t size) { std::size_t amount = std::max(static_cast<size_t>(32) << free_list_.size(), size); uint8_t *ret = static_cast<uint8_t*>(MallocOrThrow(amount)); free_list_.push_back(ret); current_ = ret + size; current_end_ = ret + amount; return ret; } } // namespace util
0
coqui_public_repos/TTS/TTS/vocoder
coqui_public_repos/TTS/TTS/vocoder/datasets/wavegrad_dataset.py
import glob import os import random from multiprocessing import Manager from typing import List, Tuple import numpy as np import torch from torch.utils.data import Dataset class WaveGradDataset(Dataset): """ WaveGrad Dataset searchs for all the wav files under root path and converts them to acoustic features on the fly and returns random segments of (audio, feature) couples. """ def __init__( self, ap, items, seq_len, hop_len, pad_short, conv_pad=2, is_training=True, return_segments=True, use_noise_augment=False, use_cache=False, verbose=False, ): super().__init__() self.ap = ap self.item_list = items self.seq_len = seq_len if return_segments else None self.hop_len = hop_len self.pad_short = pad_short self.conv_pad = conv_pad self.is_training = is_training self.return_segments = return_segments self.use_cache = use_cache self.use_noise_augment = use_noise_augment self.verbose = verbose if return_segments: assert seq_len % hop_len == 0, " [!] seq_len has to be a multiple of hop_len." self.feat_frame_len = seq_len // hop_len + (2 * conv_pad) # cache acoustic features if use_cache: self.create_feature_cache() def create_feature_cache(self): self.manager = Manager() self.cache = self.manager.list() self.cache += [None for _ in range(len(self.item_list))] @staticmethod def find_wav_files(path): return glob.glob(os.path.join(path, "**", "*.wav"), recursive=True) def __len__(self): return len(self.item_list) def __getitem__(self, idx): item = self.load_item(idx) return item def load_test_samples(self, num_samples: int) -> List[Tuple]: """Return test samples. Args: num_samples (int): Number of samples to return. Returns: List[Tuple]: melspectorgram and audio. Shapes: - melspectrogram (Tensor): :math:`[C, T]` - audio (Tensor): :math:`[T_audio]` """ samples = [] return_segments = self.return_segments self.return_segments = False for idx in range(num_samples): mel, audio = self.load_item(idx) samples.append([mel, audio]) self.return_segments = return_segments return samples def load_item(self, idx): """load (audio, feat) couple""" # compute features from wav wavpath = self.item_list[idx] if self.use_cache and self.cache[idx] is not None: audio = self.cache[idx] else: audio = self.ap.load_wav(wavpath) if self.return_segments: # correct audio length wrt segment length if audio.shape[-1] < self.seq_len + self.pad_short: audio = np.pad( audio, (0, self.seq_len + self.pad_short - len(audio)), mode="constant", constant_values=0.0 ) assert ( audio.shape[-1] >= self.seq_len + self.pad_short ), f"{audio.shape[-1]} vs {self.seq_len + self.pad_short}" # correct the audio length wrt hop length p = (audio.shape[-1] // self.hop_len + 1) * self.hop_len - audio.shape[-1] audio = np.pad(audio, (0, p), mode="constant", constant_values=0.0) if self.use_cache: self.cache[idx] = audio if self.return_segments: max_start = len(audio) - self.seq_len start = random.randint(0, max_start) end = start + self.seq_len audio = audio[start:end] if self.use_noise_augment and self.is_training and self.return_segments: audio = audio + (1 / 32768) * torch.randn_like(audio) mel = self.ap.melspectrogram(audio) mel = mel[..., :-1] # ignore the padding audio = torch.from_numpy(audio).float() mel = torch.from_numpy(mel).float().squeeze(0) return (mel, audio) @staticmethod def collate_full_clips(batch): """This is used in tune_wavegrad.py. It pads sequences to the max length.""" max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, : mel.shape[1]] = mel audios[idx, : audio.shape[0]] = audio return mels, audios
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/pdt/CMakeLists.txt
file(GLOB HEADER_FILES ../../include/fst/extensions/pdt/*.h) message(STATUS "${HEADER_FILES}") if(HAVE_SCRIPT) add_library(fstpdtscript getters.cc pdtscript.cc ${HEADER_FILES}) target_link_libraries(fstpdtscript fstscript fst) set_target_properties(fstpdtscript PROPERTIES SOVERSION "${SOVERSION}" FOLDER pdt ) install(TARGETS fstpdtscript LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION lib ) endif(HAVE_SCRIPT) if(HAVE_BIN) function (add_executable2 _name) add_executable(${ARGV}) if (TARGET ${_name}) target_link_libraries(${_name} fstpdtscript fstscript fst ${CMAKE_DL_LIBS}) set_target_properties(${_name} PROPERTIES FOLDER pdt/bin ) endif() install(TARGETS ${_name} RUNTIME DESTINATION bin) endfunction() add_executable2(pdtcompose pdtcompose.cc) add_executable2(pdtexpand pdtexpand.cc) add_executable2(pdtinfo pdtinfo.cc) add_executable2(pdtreplace pdtreplace.cc) add_executable2(pdtreverse pdtreverse.cc) add_executable2(pdtshortestpath pdtshortestpath.cc) endif(HAVE_BIN)
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_10x_16k-linux-amd64-prod_pbmodel-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "linux-amd64-cpu-opt" system_setup: > ${nodejs.packages_xenial.prep_10} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests-prod.sh 10.x 16k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU NodeJS 10.x prod tests (16kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v10.x on prod model, CPU only, optimized version (16kHz)"
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/host-build.sh
#!/bin/bash set -xe runtime=$1 source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" //native_client:libstt.so //native_client:generate_scorer_package " if [ "${runtime}" = "tflite" ]; then BAZEL_BUILD_TFLITE="--define=runtime=tflite" fi; BAZEL_BUILD_FLAGS="${BAZEL_BUILD_TFLITE} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}" BAZEL_ENV_FLAGS="TF_NEED_CUDA=0" SYSTEM_TARGET=host do_bazel_build do_deepspeech_binary_build # TODO: Disabled for now #if [ "${runtime}" = "tflite" ]; then # do_deepspeech_python_build "--tflite" #else # do_deepspeech_python_build #fi # #do_deepspeech_nodejs_build
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstconvert.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_string(fst_type, "vector", "Output FST type"); int fstconvert_main(int argc, char **argv); int main(int argc, char **argv) { return fstconvert_main(argc, argv); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/arc-map.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Class to map over/transform arcs e.g., change semirings or // implement project/invert. Consider using when operation does // not change the number of arcs (except possibly superfinal arcs). #ifndef FST_ARC_MAP_H_ #define FST_ARC_MAP_H_ #include <string> #include <unordered_map> #include <utility> #include <fst/log.h> #include <fst/cache.h> #include <fst/mutable-fst.h> namespace fst { // Determines how final weights are mapped. enum MapFinalAction { // A final weight is mapped into a final weight. An error is raised if this // is not possible. MAP_NO_SUPERFINAL, // A final weight is mapped to an arc to the superfinal state when the result // cannot be represented as a final weight. The superfinal state will be // added only if it is needed. MAP_ALLOW_SUPERFINAL, // A final weight is mapped to an arc to the superfinal state unless the // result can be represented as a final weight of weight Zero(). The // superfinal state is always added (if the input is not the empty FST). MAP_REQUIRE_SUPERFINAL }; // Determines how symbol tables are mapped. enum MapSymbolsAction { // Symbols should be cleared in the result by the map. MAP_CLEAR_SYMBOLS, // Symbols should be copied from the input FST by the map. MAP_COPY_SYMBOLS, // Symbols should not be modified in the result by the map itself. // (They may set by the mapper). MAP_NOOP_SYMBOLS }; // The ArcMapper interfaces defines how arcs and final weights are mapped. // This is useful for implementing operations that do not change the number of // arcs (expect possibly superfinal arcs). // // template <class A, class B> // class ArcMapper { // public: // using FromArc = A; // using ToArc = B; // // // Maps an arc type FromArc to arc type ToArc. // ToArc operator()(const FromArc &arc); // // // Specifies final action the mapper requires (see above). // // The mapper will be passed final weights as arcs of the form // // Arc(0, 0, weight, kNoStateId). // MapFinalAction FinalAction() const; // // // Specifies input symbol table action the mapper requires (see above). // MapSymbolsAction InputSymbolsAction() const; // // // Specifies output symbol table action the mapper requires (see above). // MapSymbolsAction OutputSymbolsAction() const; // // // This specifies the known properties of an FST mapped by this mapper. It // takes as argument the input FSTs's known properties. // uint64_t Properties(uint64_t props) const; // }; // // The ArcMap functions and classes below will use the FinalAction() // method of the mapper to determine how to treat final weights, e.g., whether // to add a superfinal state. They will use the Properties() method to set the // result FST properties. // // We include a various map versions below. One dimension of variation is // whether the mapping mutates its input, writes to a new result FST, or is an // on-the-fly FST. Another dimension is how we pass the mapper. We allow passing // the mapper by pointer for cases that we need to change the state of the // user's mapper. This is the case with the EncodeMapper, which is reused // during decoding. We also include map versions that pass the mapper by value // or const reference when this suffices. // Maps an arc type A using a mapper function object C, passed // by pointer. This version modifies its Fst input. template <class A, class C> void ArcMap(MutableFst<A> *fst, C *mapper) { using FromArc = A; using ToArc = A; using StateId = typename FromArc::StateId; using Weight = typename FromArc::Weight; if (mapper->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { fst->SetInputSymbols(nullptr); } if (mapper->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { fst->SetOutputSymbols(nullptr); } if (fst->Start() == kNoStateId) return; const auto props = fst->Properties(kFstProperties, false); const auto final_action = mapper->FinalAction(); auto superfinal = kNoStateId; if (final_action == MAP_REQUIRE_SUPERFINAL) { superfinal = fst->AddState(); fst->SetFinal(superfinal, Weight::One()); } for (StateIterator<MutableFst<FromArc>> siter(*fst); !siter.Done(); siter.Next()) { const auto state = siter.Value(); for (MutableArcIterator<MutableFst<FromArc>> aiter(fst, state); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); aiter.SetValue((*mapper)(arc)); } switch (final_action) { case MAP_NO_SUPERFINAL: default: { const FromArc arc(0, 0, fst->Final(state), kNoStateId); const auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMap: Non-zero arc labels for superfinal arc"; fst->SetProperties(kError, kError); } fst->SetFinal(state, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { if (state != superfinal) { const FromArc arc(0, 0, fst->Final(state), kNoStateId); auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { // Add a superfinal state if not already done. if (superfinal == kNoStateId) { superfinal = fst->AddState(); fst->SetFinal(superfinal, Weight::One()); } final_arc.nextstate = superfinal; fst->AddArc(state, final_arc); fst->SetFinal(state, Weight::Zero()); } else { fst->SetFinal(state, final_arc.weight); } } break; } case MAP_REQUIRE_SUPERFINAL: { if (state != superfinal) { const FromArc arc(0, 0, fst->Final(state), kNoStateId); const auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != Weight::Zero()) { fst->AddArc(state, ToArc(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal)); } fst->SetFinal(state, Weight::Zero()); } break; } } } fst->SetProperties(mapper->Properties(props), kFstProperties); } // Maps an arc type A using a mapper function object C, passed by value. This // version modifies its FST input. template <class A, class C> void ArcMap(MutableFst<A> *fst, C mapper) { ArcMap(fst, &mapper); } // Maps an arc type A to an arc type B using mapper function object C, // passed by pointer. This version writes the mapped input FST to an // output MutableFst. template <class A, class B, class C> void ArcMap(const Fst<A> &ifst, MutableFst<B> *ofst, C *mapper) { using FromArc = A; using StateId = typename FromArc::StateId; using Weight = typename FromArc::Weight; ofst->DeleteStates(); if (mapper->InputSymbolsAction() == MAP_COPY_SYMBOLS) { ofst->SetInputSymbols(ifst.InputSymbols()); } else if (mapper->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { ofst->SetInputSymbols(nullptr); } if (mapper->OutputSymbolsAction() == MAP_COPY_SYMBOLS) { ofst->SetOutputSymbols(ifst.OutputSymbols()); } else if (mapper->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { ofst->SetOutputSymbols(nullptr); } const auto iprops = ifst.Properties(kCopyProperties, false); if (ifst.Start() == kNoStateId) { if (iprops & kError) ofst->SetProperties(kError, kError); return; } const auto final_action = mapper->FinalAction(); if (ifst.Properties(kExpanded, false)) { ofst->ReserveStates( CountStates(ifst) + final_action == MAP_NO_SUPERFINAL ? 0 : 1); } // Adds all states. for (StateIterator<Fst<A>> siter(ifst); !siter.Done(); siter.Next()) { ofst->AddState(); } StateId superfinal = kNoStateId; if (final_action == MAP_REQUIRE_SUPERFINAL) { superfinal = ofst->AddState(); ofst->SetFinal(superfinal, B::Weight::One()); } for (StateIterator<Fst<A>> siter(ifst); !siter.Done(); siter.Next()) { StateId s = siter.Value(); if (s == ifst.Start()) ofst->SetStart(s); ofst->ReserveArcs(s, ifst.NumArcs(s)); for (ArcIterator<Fst<A>> aiter(ifst, s); !aiter.Done(); aiter.Next()) { ofst->AddArc(s, (*mapper)(aiter.Value())); } switch (final_action) { case MAP_NO_SUPERFINAL: default: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMap: Non-zero arc labels for superfinal arc"; ofst->SetProperties(kError, kError); } ofst->SetFinal(s, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { // Add a superfinal state if not already done. if (superfinal == kNoStateId) { superfinal = ofst->AddState(); ofst->SetFinal(superfinal, B::Weight::One()); } final_arc.nextstate = superfinal; ofst->AddArc(s, final_arc); ofst->SetFinal(s, B::Weight::Zero()); } else { ofst->SetFinal(s, final_arc.weight); } break; } case MAP_REQUIRE_SUPERFINAL: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != B::Weight::Zero()) { ofst->AddArc(s, B(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal)); } ofst->SetFinal(s, B::Weight::Zero()); break; } } } const auto oprops = ofst->Properties(kFstProperties, false); ofst->SetProperties(mapper->Properties(iprops) | oprops, kFstProperties); } // Maps an arc type A to an arc type B using mapper function // object C, passed by value. This version writes the mapped input // Fst to an output MutableFst. template <class A, class B, class C> void ArcMap(const Fst<A> &ifst, MutableFst<B> *ofst, C mapper) { ArcMap(ifst, ofst, &mapper); } struct ArcMapFstOptions : public CacheOptions { // ArcMapFst default caching behaviour is to do no caching. Most mappers are // cheap and therefore we save memory by not doing caching. ArcMapFstOptions() : CacheOptions(true, 0) {} explicit ArcMapFstOptions(const CacheOptions &opts) : CacheOptions(opts) {} }; template <class A, class B, class C> class ArcMapFst; namespace internal { // Implementation of delayed ArcMapFst. template <class A, class B, class C> class ArcMapFstImpl : public CacheImpl<B> { public: using Arc = B; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using FstImpl<B>::SetType; using FstImpl<B>::SetProperties; using FstImpl<B>::SetInputSymbols; using FstImpl<B>::SetOutputSymbols; using CacheImpl<B>::PushArc; using CacheImpl<B>::HasArcs; using CacheImpl<B>::HasFinal; using CacheImpl<B>::HasStart; using CacheImpl<B>::SetArcs; using CacheImpl<B>::SetFinal; using CacheImpl<B>::SetStart; friend class StateIterator<ArcMapFst<A, B, C>>; ArcMapFstImpl(const Fst<A> &fst, const C &mapper, const ArcMapFstOptions &opts) : CacheImpl<B>(opts), fst_(fst.Copy()), mapper_(new C(mapper)), own_mapper_(true), superfinal_(kNoStateId), nstates_(0) { Init(); } ArcMapFstImpl(const Fst<A> &fst, C *mapper, const ArcMapFstOptions &opts) : CacheImpl<B>(opts), fst_(fst.Copy()), mapper_(mapper), own_mapper_(false), superfinal_(kNoStateId), nstates_(0) { Init(); } ArcMapFstImpl(const ArcMapFstImpl<A, B, C> &impl) : CacheImpl<B>(impl), fst_(impl.fst_->Copy(true)), mapper_(new C(*impl.mapper_)), own_mapper_(true), superfinal_(kNoStateId), nstates_(0) { Init(); } ~ArcMapFstImpl() override { if (own_mapper_) delete mapper_; } StateId Start() { if (!HasStart()) SetStart(FindOState(fst_->Start())); return CacheImpl<B>::Start(); } Weight Final(StateId s) { if (!HasFinal(s)) { switch (final_action_) { case MAP_NO_SUPERFINAL: default: { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMapFst: Non-zero arc labels for superfinal arc"; SetProperties(kError, kError); } SetFinal(s, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { if (s == superfinal_) { SetFinal(s, Weight::One()); } else { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel == 0 && final_arc.olabel == 0) { SetFinal(s, final_arc.weight); } else { SetFinal(s, Weight::Zero()); } } break; } case MAP_REQUIRE_SUPERFINAL: { SetFinal(s, s == superfinal_ ? Weight::One() : Weight::Zero()); break; } } } return CacheImpl<B>::Final(s); } size_t NumArcs(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumArcs(s); } size_t NumInputEpsilons(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumInputEpsilons(s); } size_t NumOutputEpsilons(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumOutputEpsilons(s); } uint64_t Properties() const override { return Properties(kFstProperties); } // Sets error if found, and returns other FST impl properties. uint64_t Properties(uint64_t mask) const override { if ((mask & kError) && (fst_->Properties(kError, false) || (mapper_->Properties(0) & kError))) { SetProperties(kError, kError); } return FstImpl<Arc>::Properties(mask); } void InitArcIterator(StateId s, ArcIteratorData<B> *data) { if (!HasArcs(s)) Expand(s); CacheImpl<B>::InitArcIterator(s, data); } void Expand(StateId s) { // Add exiting arcs. if (s == superfinal_) { SetArcs(s); return; } for (ArcIterator<Fst<A>> aiter(*fst_, FindIState(s)); !aiter.Done(); aiter.Next()) { auto aarc = aiter.Value(); aarc.nextstate = FindOState(aarc.nextstate); const auto &barc = (*mapper_)(aarc); PushArc(s, barc); } // Check for superfinal arcs. if (!HasFinal(s) || Final(s) == Weight::Zero()) { switch (final_action_) { case MAP_NO_SUPERFINAL: default: break; case MAP_ALLOW_SUPERFINAL: { auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { if (superfinal_ == kNoStateId) superfinal_ = nstates_++; final_arc.nextstate = superfinal_; PushArc(s, final_arc); } break; } case MAP_REQUIRE_SUPERFINAL: { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != B::Weight::Zero()) { PushArc(s, B(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal_)); } break; } } } SetArcs(s); } private: void Init() { SetType("map"); if (mapper_->InputSymbolsAction() == MAP_COPY_SYMBOLS) { SetInputSymbols(fst_->InputSymbols()); } else if (mapper_->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { SetInputSymbols(nullptr); } if (mapper_->OutputSymbolsAction() == MAP_COPY_SYMBOLS) { SetOutputSymbols(fst_->OutputSymbols()); } else if (mapper_->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { SetOutputSymbols(nullptr); } if (fst_->Start() == kNoStateId) { final_action_ = MAP_NO_SUPERFINAL; SetProperties(kNullProperties); } else { final_action_ = mapper_->FinalAction(); uint64_t props = fst_->Properties(kCopyProperties, false); SetProperties(mapper_->Properties(props)); if (final_action_ == MAP_REQUIRE_SUPERFINAL) superfinal_ = 0; } } // Maps from output state to input state. StateId FindIState(StateId s) { if (superfinal_ == kNoStateId || s < superfinal_) { return s; } else { return s - 1; } } // Maps from input state to output state. StateId FindOState(StateId is) { auto os = is; if (!(superfinal_ == kNoStateId || is < superfinal_)) ++os; if (os >= nstates_) nstates_ = os + 1; return os; } std::unique_ptr<const Fst<A>> fst_; C *mapper_; const bool own_mapper_; MapFinalAction final_action_; StateId superfinal_; StateId nstates_; }; } // namespace internal // Maps an arc type A to an arc type B using Mapper function object // C. This version is a delayed FST. template <class A, class B, class C> class ArcMapFst : public ImplToFst<internal::ArcMapFstImpl<A, B, C>> { public: using Arc = B; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Store = DefaultCacheStore<B>; using State = typename Store::State; using Impl = internal::ArcMapFstImpl<A, B, C>; friend class ArcIterator<ArcMapFst<A, B, C>>; friend class StateIterator<ArcMapFst<A, B, C>>; ArcMapFst(const Fst<A> &fst, const C &mapper, const ArcMapFstOptions &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst, mapper, opts)) {} ArcMapFst(const Fst<A> &fst, C *mapper, const ArcMapFstOptions &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst, mapper, opts)) {} ArcMapFst(const Fst<A> &fst, const C &mapper) : ImplToFst<Impl>( std::make_shared<Impl>(fst, mapper, ArcMapFstOptions())) {} ArcMapFst(const Fst<A> &fst, C *mapper) : ImplToFst<Impl>( std::make_shared<Impl>(fst, mapper, ArcMapFstOptions())) {} // See Fst<>::Copy() for doc. ArcMapFst(const ArcMapFst<A, B, C> &fst, bool safe = false) : ImplToFst<Impl>(fst, safe) {} // Get a copy of this ArcMapFst. See Fst<>::Copy() for further doc. ArcMapFst<A, B, C> *Copy(bool safe = false) const override { return new ArcMapFst<A, B, C>(*this, safe); } inline void InitStateIterator(StateIteratorData<B> *data) const override; void InitArcIterator(StateId s, ArcIteratorData<B> *data) const override { GetMutableImpl()->InitArcIterator(s, data); } protected: using ImplToFst<Impl>::GetImpl; using ImplToFst<Impl>::GetMutableImpl; private: ArcMapFst &operator=(const ArcMapFst &) = delete; }; // Specialization for ArcMapFst. // // This may be derived from. template <class A, class B, class C> class StateIterator<ArcMapFst<A, B, C>> : public StateIteratorBase<B> { public: using StateId = typename B::StateId; explicit StateIterator(const ArcMapFst<A, B, C> &fst) : impl_(fst.GetImpl()), siter_(*impl_->fst_), s_(0), superfinal_(impl_->final_action_ == MAP_REQUIRE_SUPERFINAL) { CheckSuperfinal(); } bool Done() const final { return siter_.Done() && !superfinal_; } StateId Value() const final { return s_; } void Next() final { ++s_; if (!siter_.Done()) { siter_.Next(); CheckSuperfinal(); } else if (superfinal_) { superfinal_ = false; } } void Reset() final { s_ = 0; siter_.Reset(); superfinal_ = impl_->final_action_ == MAP_REQUIRE_SUPERFINAL; CheckSuperfinal(); } private: void CheckSuperfinal() { if (impl_->final_action_ != MAP_ALLOW_SUPERFINAL || superfinal_) return; if (!siter_.Done()) { const auto final_arc = (*impl_->mapper_)(A(0, 0, impl_->fst_->Final(s_), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) superfinal_ = true; } } const internal::ArcMapFstImpl<A, B, C> *impl_; StateIterator<Fst<A>> siter_; StateId s_; bool superfinal_; // True if there is a superfinal state and not done. }; // Specialization for ArcMapFst. template <class A, class B, class C> class ArcIterator<ArcMapFst<A, B, C>> : public CacheArcIterator<ArcMapFst<A, B, C>> { public: using StateId = typename A::StateId; ArcIterator(const ArcMapFst<A, B, C> &fst, StateId s) : CacheArcIterator<ArcMapFst<A, B, C>>(fst.GetMutableImpl(), s) { if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s); } }; template <class A, class B, class C> inline void ArcMapFst<A, B, C>::InitStateIterator( StateIteratorData<B> *data) const { data->base = new StateIterator<ArcMapFst<A, B, C>>(*this); } // Utility Mappers. // Mapper that returns its input. template <class A> class IdentityArcMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return arc; } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props; } }; // Mapper that converts all input symbols to epsilon. template <class A> class InputEpsilonMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return ToArc(0, arc.olabel, arc.weight, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return (props & kSetArcProperties) | kIEpsilons; } }; // Mapper that converts all output symbols to epsilon. template <class A> class OutputEpsilonMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, 0, arc.weight, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64_t Properties(uint64_t props) const { return (props & kSetArcProperties) | kOEpsilons; } }; // Mapper that returns its input with final states redirected to a single // super-final state. template <class A> class SuperFinalMapper { public: using FromArc = A; using ToArc = A; using Label = typename FromArc::Label; using Weight = typename FromArc::Weight;; // Arg allows setting super-final label. explicit SuperFinalMapper(Label final_label = 0) : final_label_(final_label) {} ToArc operator()(const FromArc &arc) const { // Super-final arc. if (arc.nextstate == kNoStateId && arc.weight != Weight::Zero()) { return ToArc(final_label_, final_label_, arc.weight, kNoStateId); } else { return arc; } } constexpr MapFinalAction FinalAction() const { return MAP_REQUIRE_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { if (final_label_ == 0) { return props & kAddSuperFinalProperties; } else { return props & kAddSuperFinalProperties & kILabelInvariantProperties & kOLabelInvariantProperties; } } private: Label final_label_; }; // Mapper that leaves labels and nextstate unchanged and constructs a new weight // from the underlying value of the arc weight. If no weight converter is // explictly specified, requires that there is a WeightConvert class // specialization that converts the weights. template <class A, class B, class C = WeightConvert<typename A::Weight, typename B::Weight>> class WeightConvertMapper { public: using FromArc = A; using ToArc = B; using Converter = C; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; explicit WeightConvertMapper(const Converter &c = Converter()) : convert_weight_(c) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, convert_weight_(arc.weight), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props; } private: Converter convert_weight_; }; // Non-precision-changing weight conversions; consider using more efficient // Cast method instead. using StdToLogMapper = WeightConvertMapper<StdArc, LogArc>; using LogToStdMapper = WeightConvertMapper<LogArc, StdArc>; // Precision-changing weight conversions. using StdToLog64Mapper = WeightConvertMapper<StdArc, Log64Arc>; using LogToLog64Mapper = WeightConvertMapper<LogArc, Log64Arc>; using Log64ToStdMapper = WeightConvertMapper<Log64Arc, StdArc>; using Log64ToLogMapper = WeightConvertMapper<Log64Arc, LogArc>; // Mapper from A to GallicArc<A>. template <class A, GallicType G = GALLIC_LEFT> class ToGallicMapper { public: using FromArc = A; using ToArc = GallicArc<A, G>; using SW = StringWeight<typename A::Label, GallicStringType(G)>; using AW = typename FromArc::Weight; using GW = typename ToArc::Weight; ToArc operator()(const FromArc &arc) const { // Super-final arc. if (arc.nextstate == kNoStateId && arc.weight != AW::Zero()) { return ToArc(0, 0, GW(SW::One(), arc.weight), kNoStateId); // Super-non-final arc. } else if (arc.nextstate == kNoStateId) { return ToArc(0, 0, GW::Zero(), kNoStateId); // Epsilon label. } else if (arc.olabel == 0) { return ToArc(arc.ilabel, arc.ilabel, GW(SW::One(), arc.weight), arc.nextstate); // Regular label. } else { return ToArc(arc.ilabel, arc.ilabel, GW(SW(arc.olabel), arc.weight), arc.nextstate); } } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64_t Properties(uint64_t props) const { return ProjectProperties(props, true) & kWeightInvariantProperties; } }; // Mapper from GallicArc<A> to A. template <class A, GallicType G = GALLIC_LEFT> class FromGallicMapper { public: using FromArc = GallicArc<A, G>; using ToArc = A; using Label = typename ToArc::Label; using AW = typename ToArc::Weight; using GW = typename FromArc::Weight; explicit FromGallicMapper(Label superfinal_label = 0) : superfinal_label_(superfinal_label), error_(false) {} ToArc operator()(const FromArc &arc) const { // 'Super-non-final' arc. if (arc.nextstate == kNoStateId && arc.weight == GW::Zero()) { return A(arc.ilabel, 0, AW::Zero(), kNoStateId); } Label l = kNoLabel; AW weight; if (!Extract(arc.weight, &weight, &l) || arc.ilabel != arc.olabel) { FSTERROR() << "FromGallicMapper: Unrepresentable weight: " << arc.weight << " for arc with ilabel = " << arc.ilabel << ", olabel = " << arc.olabel << ", nextstate = " << arc.nextstate; error_ = true; } if (arc.ilabel == 0 && l != 0 && arc.nextstate == kNoStateId) { return ToArc(superfinal_label_, l, weight, arc.nextstate); } else { return ToArc(arc.ilabel, l, weight, arc.nextstate); } } constexpr MapFinalAction FinalAction() const { return MAP_ALLOW_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64_t Properties(uint64_t inprops) const { uint64_t outprops = inprops & kOLabelInvariantProperties & kWeightInvariantProperties & kAddSuperFinalProperties; if (error_) outprops |= kError; return outprops; } private: template <GallicType GT> static bool Extract(const GallicWeight<Label, AW, GT> &gallic_weight, typename A::Weight *weight, typename A::Label *label) { using GWT = StringWeight<Label, GallicStringType(GT)>; const GWT &w1 = gallic_weight.Value1(); const AW &w2 = gallic_weight.Value2(); typename GWT::Iterator iter1(w1); const Label l = w1.Size() == 1 ? iter1.Value() : 0; if (l == kStringInfinity || l == kStringBad || w1.Size() > 1) return false; *label = l; *weight = w2; return true; } static bool Extract(const GallicWeight<Label, AW, GALLIC> &gallic_weight, typename A::Weight *weight, typename A::Label *label) { if (gallic_weight.Size() > 1) return false; if (gallic_weight.Size() == 0) { *label = 0; *weight = A::Weight::Zero(); return true; } return Extract<GALLIC_RESTRICT>(gallic_weight.Back(), weight, label); } const Label superfinal_label_; mutable bool error_; }; // Mapper from GallicArc<A> to A. template <class A, GallicType G = GALLIC_LEFT> class GallicToNewSymbolsMapper { public: using FromArc = GallicArc<A, G>; using ToArc = A; using Label = typename ToArc::Label; using StateId = typename ToArc::StateId; using AW = typename ToArc::Weight; using GW = typename FromArc::Weight; using SW = StringWeight<Label, GallicStringType(G)>; explicit GallicToNewSymbolsMapper(MutableFst<ToArc> *fst) : fst_(fst), lmax_(0), osymbols_(fst->OutputSymbols()), isymbols_(nullptr), error_(false) { fst_->DeleteStates(); state_ = fst_->AddState(); fst_->SetStart(state_); fst_->SetFinal(state_, AW::One()); if (osymbols_) { string name = osymbols_->Name() + "_from_gallic"; fst_->SetInputSymbols(new SymbolTable(name)); isymbols_ = fst_->MutableInputSymbols(); const int64_t zero = 0; isymbols_->AddSymbol(osymbols_->Find(zero), 0); } else { fst_->SetInputSymbols(nullptr); } } ToArc operator()(const FromArc &arc) { // Super-non-final arc. if (arc.nextstate == kNoStateId && arc.weight == GW::Zero()) { return ToArc(arc.ilabel, 0, AW::Zero(), kNoStateId); } SW w1 = arc.weight.Value1(); AW w2 = arc.weight.Value2(); Label l; if (w1.Size() == 0) { l = 0; } else { auto insert_result = map_.insert(std::make_pair(w1, kNoLabel)); if (!insert_result.second) { l = insert_result.first->second; } else { l = ++lmax_; insert_result.first->second = l; StringWeightIterator<SW> iter1(w1); StateId n; string s; for (size_t i = 0, p = state_; i < w1.Size(); ++i, iter1.Next(), p = n) { n = i == w1.Size() - 1 ? state_ : fst_->AddState(); fst_->AddArc(p, ToArc(i ? 0 : l, iter1.Value(), AW::One(), n)); if (isymbols_) { if (i) s = s + "_"; s = s + osymbols_->Find(iter1.Value()); } } if (isymbols_) isymbols_->AddSymbol(s, l); } } if (l == kStringInfinity || l == kStringBad || arc.ilabel != arc.olabel) { FSTERROR() << "GallicToNewSymbolMapper: Unrepresentable weight: " << l; error_ = true; } return ToArc(arc.ilabel, l, w2, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_ALLOW_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64_t Properties(uint64_t inprops) const { uint64_t outprops = inprops & kOLabelInvariantProperties & kWeightInvariantProperties & kAddSuperFinalProperties; if (error_) outprops |= kError; return outprops; } private: class StringKey { public: size_t operator()(const SW &x) const { return x.Hash(); } }; using Map = std::unordered_map<SW, Label, StringKey>; MutableFst<ToArc> *fst_; Map map_; Label lmax_; StateId state_; const SymbolTable *osymbols_; SymbolTable *isymbols_; mutable bool error_; }; // TODO(kbg): Add common base class for those mappers which do nothing except // mutate their weights. // Mapper to add a constant to all weights. template <class A> class PlusMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit PlusMapper(Weight weight) : weight_(std::move(weight)) {} ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Plus(arc.weight, weight_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props & kWeightInvariantProperties; } private: const Weight weight_; }; // Mapper to (right) multiply a constant to all weights. template <class A> class TimesMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit TimesMapper(Weight weight) : weight_(std::move(weight)) {} ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Times(arc.weight, weight_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props & kWeightInvariantProperties; } private: const Weight weight_; }; // Mapper to take all weights to a constant power. The power argument is stored // as a double, so if there is a floating-point power implementation for this // weight type, it will take precedence. Otherwise, the power argument's 53 bits // of integer precision will be implicitly converted to a size_t and the default // power implementation (iterated multiplication) will be used instead. template <class A> class PowerMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit PowerMapper(double power) : power_(power) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, Power(arc.weight, power_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props & kWeightInvariantProperties; } private: const double power_; }; // Mapper to reciprocate all non-Zero() weights. template <class A> class InvertWeightMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Divide(Weight::One(), arc.weight), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props & kWeightInvariantProperties; } }; // Mapper to map all non-Zero() weights to One(). template <class A, class B = A> class RmWeightMapper { public: using FromArc = A; using ToArc = B; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight != FromWeight::Zero() ? ToWeight::One() : ToWeight::Zero(), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return (props & kWeightInvariantProperties) | kUnweighted; } }; // Mapper to quantize all weights. template <class A, class B = A> class QuantizeMapper { public: using FromArc = A; using ToArc = B; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; QuantizeMapper() : delta_(kDelta) {} explicit QuantizeMapper(float d) : delta_(d) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight.Quantize(delta_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props & kWeightInvariantProperties; } private: const float delta_; }; // Mapper from A to B under the assumption: // // B::Weight = A::Weight::ReverseWeight // B::Label == A::Label // B::StateId == A::StateId // // The weight is reversed, while the label and nextstate are preserved. template <class A, class B> class ReverseWeightMapper { public: using FromArc = A; using ToArc = B; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight.Reverse(), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64_t Properties(uint64_t props) const { return props; } }; } // namespace fst #endif // FST_ARC_MAP_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_35_tflite_16k-win-amd64-prod-opt.yml
build: template_file: test-win-opt-base.tyml dependencies: - "win-amd64-tflite-opt" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${system.sox_win} args: tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python_tflite-tests-prod.sh 3.5.4:m 16k" metadata: name: "DeepSpeech Windows AMD64 TFLite Python v3.5 prod tests (16kHz)" description: "Testing DeepSpeech for Windows/AMD64 on Python v3.5 on prod model, TFLite, optimized version (16kHz)"
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/mpdt/Makefile.in
# Makefile.in generated by automake 1.14.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @HAVE_BIN_TRUE@bin_PROGRAMS = mpdtcompose$(EXEEXT) mpdtexpand$(EXEEXT) \ @HAVE_BIN_TRUE@ mpdtinfo$(EXEEXT) mpdtreverse$(EXEEXT) subdir = src/extensions/mpdt DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_DEPENDENCIES = \ @HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__libfstmpdtscript_la_SOURCES_DIST = mpdtscript.cc @HAVE_SCRIPT_TRUE@am_libfstmpdtscript_la_OBJECTS = mpdtscript.lo libfstmpdtscript_la_OBJECTS = $(am_libfstmpdtscript_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libfstmpdtscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) $(libfstmpdtscript_la_LDFLAGS) \ $(LDFLAGS) -o $@ @HAVE_SCRIPT_TRUE@am_libfstmpdtscript_la_rpath = -rpath $(libdir) PROGRAMS = $(bin_PROGRAMS) am__mpdtcompose_SOURCES_DIST = mpdtcompose.cc @HAVE_BIN_TRUE@am_mpdtcompose_OBJECTS = mpdtcompose.$(OBJEXT) mpdtcompose_OBJECTS = $(am_mpdtcompose_OBJECTS) mpdtcompose_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtcompose_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtexpand_SOURCES_DIST = mpdtexpand.cc @HAVE_BIN_TRUE@am_mpdtexpand_OBJECTS = mpdtexpand.$(OBJEXT) mpdtexpand_OBJECTS = $(am_mpdtexpand_OBJECTS) mpdtexpand_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtexpand_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtinfo_SOURCES_DIST = mpdtinfo.cc @HAVE_BIN_TRUE@am_mpdtinfo_OBJECTS = mpdtinfo.$(OBJEXT) mpdtinfo_OBJECTS = $(am_mpdtinfo_OBJECTS) mpdtinfo_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtinfo_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtreverse_SOURCES_DIST = mpdtreverse.cc @HAVE_BIN_TRUE@am_mpdtreverse_OBJECTS = mpdtreverse.$(OBJEXT) mpdtreverse_OBJECTS = $(am_mpdtreverse_OBJECTS) mpdtreverse_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtreverse_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libfstmpdtscript_la_SOURCES) $(mpdtcompose_SOURCES) \ $(mpdtexpand_SOURCES) $(mpdtinfo_SOURCES) \ $(mpdtreverse_SOURCES) DIST_SOURCES = $(am__libfstmpdtscript_la_SOURCES_DIST) \ $(am__mpdtcompose_SOURCES_DIST) $(am__mpdtexpand_SOURCES_DIST) \ $(am__mpdtinfo_SOURCES_DIST) $(am__mpdtreverse_SOURCES_DIST) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) @HAVE_BIN_TRUE@LDADD = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) @HAVE_BIN_TRUE@mpdtcompose_SOURCES = mpdtcompose.cc @HAVE_BIN_TRUE@mpdtexpand_SOURCES = mpdtexpand.cc @HAVE_BIN_TRUE@mpdtinfo_SOURCES = mpdtinfo.cc @HAVE_BIN_TRUE@mpdtreverse_SOURCES = mpdtreverse.cc @HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstmpdtscript.la @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_SOURCES = mpdtscript.cc @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_LDFLAGS = -version-info 10:0:0 @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_LIBADD = ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cc .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/mpdt/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/extensions/mpdt/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libfstmpdtscript.la: $(libfstmpdtscript_la_OBJECTS) $(libfstmpdtscript_la_DEPENDENCIES) $(EXTRA_libfstmpdtscript_la_DEPENDENCIES) $(AM_V_CXXLD)$(libfstmpdtscript_la_LINK) $(am_libfstmpdtscript_la_rpath) $(libfstmpdtscript_la_OBJECTS) $(libfstmpdtscript_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list mpdtcompose$(EXEEXT): $(mpdtcompose_OBJECTS) $(mpdtcompose_DEPENDENCIES) $(EXTRA_mpdtcompose_DEPENDENCIES) @rm -f mpdtcompose$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtcompose_OBJECTS) $(mpdtcompose_LDADD) $(LIBS) mpdtexpand$(EXEEXT): $(mpdtexpand_OBJECTS) $(mpdtexpand_DEPENDENCIES) $(EXTRA_mpdtexpand_DEPENDENCIES) @rm -f mpdtexpand$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtexpand_OBJECTS) $(mpdtexpand_LDADD) $(LIBS) mpdtinfo$(EXEEXT): $(mpdtinfo_OBJECTS) $(mpdtinfo_DEPENDENCIES) $(EXTRA_mpdtinfo_DEPENDENCIES) @rm -f mpdtinfo$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtinfo_OBJECTS) $(mpdtinfo_LDADD) $(LIBS) mpdtreverse$(EXEEXT): $(mpdtreverse_OBJECTS) $(mpdtreverse_DEPENDENCIES) $(EXTRA_mpdtreverse_DEPENDENCIES) @rm -f mpdtreverse$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtreverse_OBJECTS) $(mpdtreverse_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtcompose.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtexpand.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtinfo.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtreverse.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtscript.Plo@am__quote@ .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cc.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) install-binPROGRAMS: install-libLTLIBRARIES installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \ uninstall-libLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/lm/read_arpa.cc
#include "lm/read_arpa.hh" #include "lm/blank.hh" #include "util/file.hh" #include <cmath> #include <cstdlib> #include <iostream> #include <sstream> #include <vector> #include <cctype> #include <cstring> #include <stdint.h> #ifdef WIN32 #include <float.h> #endif namespace lm { // 1 for '\t', '\n', '\r', and ' '. This is stricter than isspace. Apparently ARPA allows vertical tab inside a word. const bool kARPASpaces[256] = {0,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; namespace { bool IsEntirelyWhiteSpace(const StringPiece &line) { for (size_t i = 0; i < static_cast<size_t>(line.size()); ++i) { if (!isspace(line.data()[i])) return false; } return true; } const char kBinaryMagic[] = "mmap lm http://kheafield.com/code"; // strtoull isn't portable enough :-( uint64_t ReadCount(const std::string &from) { std::stringstream stream(from); uint64_t ret; stream >> ret; UTIL_THROW_IF(!stream, FormatLoadException, "Bad count " << from); return ret; } } // namespace void ReadARPACounts(util::FilePiece &in, std::vector<uint64_t> &number) { number.clear(); StringPiece line = in.ReadLine(); // In general, ARPA files can have arbitrary text before "\data\" // But in KenLM, we require such lines to start with "#", so that // we can do stricter error checking while (IsEntirelyWhiteSpace(line) || starts_with(line, "#")) { line = in.ReadLine(); } if (line != "\\data\\") { if ((line.size() >= 2) && (line.data()[0] == 0x1f) && (static_cast<unsigned char>(line.data()[1]) == 0x8b)) { UTIL_THROW(FormatLoadException, "Looks like a gzip file. If this is an ARPA file, pipe " << in.FileName() << " through zcat. If this already in binary format, you need to decompress it because mmap doesn't work on top of gzip."); } if (static_cast<size_t>(line.size()) >= strlen(kBinaryMagic) && StringPiece(line.data(), strlen(kBinaryMagic)) == kBinaryMagic) UTIL_THROW(FormatLoadException, "This looks like a binary file but got sent to the ARPA parser. Did you compress the binary file or pass a binary file where only ARPA files are accepted?"); UTIL_THROW_IF(line.size() >= 4 && StringPiece(line.data(), 4) == "blmt", FormatLoadException, "This looks like an IRSTLM binary file. Did you forget to pass --text yes to compile-lm?"); UTIL_THROW_IF(line == "iARPA", FormatLoadException, "This looks like an IRSTLM iARPA file. You need an ARPA file. Run\n compile-lm --text yes " << in.FileName() << " " << in.FileName() << ".arpa\nfirst."); UTIL_THROW(FormatLoadException, "first non-empty line was \"" << line << "\" not \\data\\."); } while (!IsEntirelyWhiteSpace(line = in.ReadLine())) { if (line.size() < 6 || strncmp(line.data(), "ngram ", 6)) UTIL_THROW(FormatLoadException, "count line \"" << line << "\"doesn't begin with \"ngram \""); // So strtol doesn't go off the end of line. std::string remaining(line.data() + 6, line.size() - 6); char *end_ptr; unsigned int length = std::strtol(remaining.c_str(), &end_ptr, 10); if ((end_ptr == remaining.c_str()) || (length - 1 != number.size())) UTIL_THROW(FormatLoadException, "ngram count lengths should be consecutive starting with 1: " << line); if (*end_ptr != '=') UTIL_THROW(FormatLoadException, "Expected = immediately following the first number in the count line " << line); ++end_ptr; number.push_back(ReadCount(end_ptr)); } } void ReadNGramHeader(util::FilePiece &in, unsigned int length) { StringPiece line; while (IsEntirelyWhiteSpace(line = in.ReadLine())) {} std::stringstream expected; expected << '\\' << length << "-grams:"; if (line != expected.str()) UTIL_THROW(FormatLoadException, "Was expecting n-gram header " << expected.str() << " but got " << line << " instead"); } void ConsumeNewline(util::FilePiece &in) { char follow = in.get(); UTIL_THROW_IF('\n' != follow, FormatLoadException, "Expected newline got '" << follow << "'"); } void ReadBackoff(util::FilePiece &in, Prob &/*weights*/) { switch (in.get()) { case '\t': { float got = in.ReadFloat(); if (got != 0.0) UTIL_THROW(FormatLoadException, "Non-zero backoff " << got << " provided for an n-gram that should have no backoff"); } break; case '\r': ConsumeNewline(in); // Intentionally no break. case '\n': break; default: UTIL_THROW(FormatLoadException, "Expected tab or newline for backoff"); } } void ReadBackoff(util::FilePiece &in, float &backoff) { // Always make zero negative. // Negative zero means that no (n+1)-gram has this n-gram as context. // Therefore the hypothesis state can be shorter. Of course, many n-grams // are context for (n+1)-grams. An algorithm in the data structure will go // back and set the backoff to positive zero in these cases. switch (in.get()) { case '\t': backoff = in.ReadFloat(); if (backoff == ngram::kExtensionBackoff) backoff = ngram::kNoExtensionBackoff; { #if defined(WIN32) && !defined(__MINGW32__) int float_class = _fpclass(backoff); UTIL_THROW_IF(float_class == _FPCLASS_SNAN || float_class == _FPCLASS_QNAN || float_class == _FPCLASS_NINF || float_class == _FPCLASS_PINF, FormatLoadException, "Bad backoff " << backoff); #else int float_class = std::fpclassify(backoff); UTIL_THROW_IF(float_class == FP_NAN || float_class == FP_INFINITE, FormatLoadException, "Bad backoff " << backoff); #endif } switch (char got = in.get()) { case '\r': ConsumeNewline(in); case '\n': break; default: UTIL_THROW(FormatLoadException, "Expected newline after backoffs, got " << got); } break; case '\r': ConsumeNewline(in); // Intentionally no break. case '\n': backoff = ngram::kNoExtensionBackoff; break; default: UTIL_THROW(FormatLoadException, "Expected tab or newline for backoff"); } } void ReadEnd(util::FilePiece &in) { StringPiece line; do { line = in.ReadLine(); } while (IsEntirelyWhiteSpace(line)); if (line != "\\end\\") UTIL_THROW(FormatLoadException, "Expected \\end\\ but the ARPA file has " << line); try { while (true) { line = in.ReadLine(); if (!IsEntirelyWhiteSpace(line)) UTIL_THROW(FormatLoadException, "Trailing line " << line); } } catch (const util::EndOfFileException &e) {} } void PositiveProbWarn::Warn(float prob) { switch (action_) { case THROW_UP: UTIL_THROW(FormatLoadException, "Positive log probability " << prob << " in the model. This is a bug in IRSTLM; you can set config.positive_log_probability = SILENT or pass -i to build_binary to substitute 0.0 for the log probability. Error"); case COMPLAIN: std::cerr << "There's a positive log probability " << prob << " in the APRA file, probably because of a bug in IRSTLM. This and subsequent entires will be mapped to 0 log probability." << std::endl; action_ = SILENT; break; case SILENT: break; } } } // namespace lm
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_14x_multiarchpkg-raspbian-rpi3-opt.yml
build: template_file: test-raspbian-opt-base.tyml dependencies: - "node-package-cpu" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_buster.prep_14} && ${nodejs.packages_buster.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_buster.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node_tflite-tests.sh 14.x 16k" metadata: name: "DeepSpeech Raspbian RPi3/ARMv7 CPU NodeJS MultiArch Package 14.x tests" description: "Testing DeepSpeech for Raspbian RPi3/ARMv7 on NodeJS MultiArch Package v14.x, CPU only, optimized version"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/script/concat.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/concat.h> #include <fst/script/script-impl.h> namespace fst { namespace script { // 1 void Concat(MutableFstClass *ofst, const FstClass &ifst) { if (!internal::ArcTypesMatch(*ofst, ifst, "Concat")) { ofst->SetProperties(kError, kError); return; } ConcatArgs1 args(ofst, ifst); Apply<Operation<ConcatArgs1>>("Concat", ofst->ArcType(), &args); } // 2 void Concat(const FstClass &ifst, MutableFstClass *ofst) { if (!internal::ArcTypesMatch(ifst, *ofst, "Concat")) { ofst->SetProperties(kError, kError); return; } ConcatArgs2 args(ifst, ofst); Apply<Operation<ConcatArgs2>>("Concat", ofst->ArcType(), &args); } REGISTER_FST_OPERATION(Concat, StdArc, ConcatArgs1); REGISTER_FST_OPERATION(Concat, LogArc, ConcatArgs1); REGISTER_FST_OPERATION(Concat, Log64Arc, ConcatArgs1); REGISTER_FST_OPERATION(Concat, StdArc, ConcatArgs2); REGISTER_FST_OPERATION(Concat, LogArc, ConcatArgs2); REGISTER_FST_OPERATION(Concat, Log64Arc, ConcatArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/TTS/TTS/vc/modules
coqui_public_repos/TTS/TTS/vc/modules/freevc/mel_processing.py
import torch import torch.utils.data from librosa.filters import mel as librosa_mel_fn MAX_WAV_VALUE = 32768.0 def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): """ PARAMS ------ C: compression factor """ return torch.log(torch.clamp(x, min=clip_val) * C) def dynamic_range_decompression_torch(x, C=1): """ PARAMS ------ C: compression factor used to compress """ return torch.exp(x) / C def spectral_normalize_torch(magnitudes): output = dynamic_range_compression_torch(magnitudes) return output def spectral_de_normalize_torch(magnitudes): output = dynamic_range_decompression_torch(magnitudes) return output mel_basis = {} hann_window = {} def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): if torch.min(y) < -1.0: print("min value is ", torch.min(y)) if torch.max(y) > 1.0: print("max value is ", torch.max(y)) global hann_window dtype_device = str(y.dtype) + "_" + str(y.device) wnsize_dtype_device = str(win_size) + "_" + dtype_device if wnsize_dtype_device not in hann_window: hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) y = torch.nn.functional.pad( y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" ) y = y.squeeze(1) spec = torch.stft( y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False, ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) return spec def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): global mel_basis dtype_device = str(spec.dtype) + "_" + str(spec.device) fmax_dtype_device = str(fmax) + "_" + dtype_device if fmax_dtype_device not in mel_basis: mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) spec = torch.matmul(mel_basis[fmax_dtype_device], spec) spec = spectral_normalize_torch(spec) return spec def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): if torch.min(y) < -1.0: print("min value is ", torch.min(y)) if torch.max(y) > 1.0: print("max value is ", torch.max(y)) global mel_basis, hann_window dtype_device = str(y.dtype) + "_" + str(y.device) fmax_dtype_device = str(fmax) + "_" + dtype_device wnsize_dtype_device = str(win_size) + "_" + dtype_device if fmax_dtype_device not in mel_basis: mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) if wnsize_dtype_device not in hann_window: hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) y = torch.nn.functional.pad( y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" ) y = y.squeeze(1) spec = torch.stft( y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False, ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) spec = torch.matmul(mel_basis[fmax_dtype_device], spec) spec = spectral_normalize_torch(spec) return spec
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/Makefile.in
# Makefile.in generated by automake 1.15.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2017 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = include lib script bin test extensions all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/script/register.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_REGISTER_H_ #define FST_SCRIPT_REGISTER_H_ #include <istream> #include <string> #include <fst/generic-register.h> #include <fst/script/fst-class.h> #include <fst/script/weight-class.h> // Holds methods and classes responsible for maintaining // the register for FstClass arc types. namespace fst { namespace script { // Registers for reading and converting various kinds of FST classes. // This class definition is to avoid a nested class definition inside the // IORegistration struct. template <class Reader, class Creator, class Converter> struct FstClassRegEntry { Reader reader; Creator creator; Converter converter; FstClassRegEntry(Reader r, Creator cr, Converter co) : reader(r), creator(cr), converter(co) {} FstClassRegEntry() : reader(nullptr), creator(nullptr), converter(nullptr) {} }; template <class Reader, class Creator, class Converter> class FstClassIORegister : public GenericRegister<string, FstClassRegEntry<Reader, Creator, Converter>, FstClassIORegister<Reader, Creator, Converter>> { public: Reader GetReader(const string &arc_type) const { return this->GetEntry(arc_type).reader; } Creator GetCreator(const string &arc_type) const { return this->GetEntry(arc_type).creator; } Converter GetConverter(const string &arc_type) const { return this->GetEntry(arc_type).converter; } protected: string ConvertKeyToSoFilename(const string &key) const final { string legal_type(key); ConvertToLegalCSymbol(&legal_type); return legal_type + "-arc.so"; } }; // Struct containing everything needed to register a particular type // of FST class (e.g., a plain FstClass, or a MutableFstClass, etc.). template <class FstClassType> struct IORegistration { using Reader = FstClassType *(*)(std::istream &stream, const FstReadOptions &opts); using Creator = FstClassImplBase *(*)(); using Converter = FstClassImplBase *(*)(const FstClass &other); using Entry = FstClassRegEntry<Reader, Creator, Converter>; // FST class Register. using Register = FstClassIORegister<Reader, Creator, Converter>; // FST class Register-er. using Registerer = GenericRegisterer<FstClassIORegister<Reader, Creator, Converter>>; }; #define REGISTER_FST_CLASS(Class, Arc) \ static IORegistration<Class>::Registerer Class##_##Arc##_registerer( \ Arc::Type(), \ IORegistration<Class>::Entry(Class::Read<Arc>, Class::Create<Arc>, \ Class::Convert<Arc>)) #define REGISTER_FST_CLASSES(Arc) \ REGISTER_FST_CLASS(FstClass, Arc); \ REGISTER_FST_CLASS(MutableFstClass, Arc); \ REGISTER_FST_CLASS(VectorFstClass, Arc); } // namespace script } // namespace fst #endif // FST_SCRIPT_REGISTER_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/swig-win-amd64.yml
build: template_file: generic_tc_caching-linux-opt-base.tyml docker_image: "ubuntu:18.04" build_or_cache: repo: "${system.swig.repo}" sha: "${system.swig.sha1}" dir: "swig" cache: artifact_url: "${system.swig_build.win.url}" artifact_namespace: "${system.swig_build.win.namespace}" system_setup: > apt-get -qq -y install autoconf automake bison build-essential mingw-w64 && (apt-get -qq -y install sudo || true) scripts: setup: "taskcluster/tc-true.sh" build: "taskcluster/build.sh x86_64-w64-mingw32" package: "taskcluster/package.sh" workerType: "${docker.dsBuild}" metadata: name: "SWIG Windows AMD64" description: "Building SWIG for Windows/AMD64"
0
coqui_public_repos/STT
coqui_public_repos/STT/doc/requirements.txt
breathe==4.27.0 semver==2.8.1 sphinx==3.5.2 # Upstream Sphinx-JS doesn't work with Python 3.9+, and PRs are not being merged # See for example https://github.com/mozilla/sphinx-js/pull/184 git+https://github.com/reuben/sphinx-js.git@e222ef7fbbbc0a119aa32568137ee5f4e9a2f33e furo==2021.2.28b28 pygments==2.7.4 docutils>=0.12,<=0.17.1 #FIXME: switch to stable after C# changes have been merged: https://github.com/djungelorm/sphinx-csharp/pull/8 git+https://github.com/reuben/sphinx-csharp.git@61c6541ce73bf6734b6980d21f690d77cb3d9ed3 recommonmark==0.7.1 # MarkupSafe>2.0.1 is incompatible with version of Jinja2 we depend on MarkupSafe==2.0.1
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/config.h.in
// OpenFst config file /* Define to 1 if you have the ICU library. */ #undef HAVE_ICU /* Define to 1 if the system has the type `std::tr1::hash<long long unsigned>'. */ #define HAVE_STD__TR1__HASH_LONG_LONG_UNSIGNED_ 1 /* Define to 1 if the system has the type `__gnu_cxx::slist<int>'. */ #define HAVE___GNU_CXX__SLIST_INT_ 1
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidxml/rapidxml_utils.hpp
#ifndef CEREAL_RAPIDXML_UTILS_HPP_INCLUDED #define CEREAL_RAPIDXML_UTILS_HPP_INCLUDED // Copyright (C) 2006, 2009 Marcin Kalicinski // Version 1.13 // Revision $DateTime: 2009/05/13 01:46:17 $ //! in certain simple scenarios. They should probably not be used if maximizing performance is the main objective. #include "rapidxml.hpp" #include <vector> #include <string> #include <fstream> #include <stdexcept> namespace cereal { namespace rapidxml { //! Represents data loaded from a file template<class Ch = char> class file { public: //! Loads file into the memory. Data will be automatically destroyed by the destructor. //! \param filename Filename to load. file(const char *filename) { using namespace std; // Open stream basic_ifstream<Ch> stream(filename, ios::binary); if (!stream) throw runtime_error(string("cannot open file ") + filename); stream.unsetf(ios::skipws); // Determine stream size stream.seekg(0, ios::end); size_t size = stream.tellg(); stream.seekg(0); // Load data and add terminating 0 m_data.resize(size + 1); stream.read(&m_data.front(), static_cast<streamsize>(size)); m_data[size] = 0; } //! Loads file into the memory. Data will be automatically destroyed by the destructor //! \param stream Stream to load from file(std::basic_istream<Ch> &stream) { using namespace std; // Load data and add terminating 0 stream.unsetf(ios::skipws); m_data.assign(istreambuf_iterator<Ch>(stream), istreambuf_iterator<Ch>()); if (stream.fail() || stream.bad()) throw runtime_error("error reading stream"); m_data.push_back(0); } //! Gets file data. //! \return Pointer to data of file. Ch *data() { return &m_data.front(); } //! Gets file data. //! \return Pointer to data of file. const Ch *data() const { return &m_data.front(); } //! Gets file data size. //! \return Size of file data, in characters. std::size_t size() const { return m_data.size(); } private: std::vector<Ch> m_data; // File data }; //! Counts children of node. Time complexity is O(n). //! \return Number of children of node template<class Ch> inline std::size_t count_children(xml_node<Ch> *node) { xml_node<Ch> *child = node->first_node(); std::size_t count = 0; while (child) { ++count; child = child->next_sibling(); } return count; } //! Counts attributes of node. Time complexity is O(n). //! \return Number of attributes of node template<class Ch> inline std::size_t count_attributes(xml_node<Ch> *node) { xml_attribute<Ch> *attr = node->first_attribute(); std::size_t count = 0; while (attr) { ++count; attr = attr->next_attribute(); } return count; } } } // namespace cereal #endif
0
coqui_public_repos/STT/native_client/java/app/src/main/res
coqui_public_repos/STT/native_client/java/app/src/main/res/drawable/ic_launcher_background.xml
<?xml version="1.0" encoding="utf-8"?> <vector xmlns:android="http://schemas.android.com/apk/res/android" android:width="108dp" android:height="108dp" android:viewportWidth="108" android:viewportHeight="108"> <path android:fillColor="#008577" android:pathData="M0,0h108v108h-108z" /> <path android:fillColor="#00000000" android:pathData="M9,0L9,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,0L19,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M29,0L29,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M39,0L39,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M49,0L49,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M59,0L59,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M69,0L69,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M79,0L79,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M89,0L89,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M99,0L99,108" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,9L108,9" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,19L108,19" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,29L108,29" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,39L108,39" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,49L108,49" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,59L108,59" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,69L108,69" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,79L108,79" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,89L108,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M0,99L108,99" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,29L89,29" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,39L89,39" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,49L89,49" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,59L89,59" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,69L89,69" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M19,79L89,79" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M29,19L29,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M39,19L39,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M49,19L49,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M59,19L59,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M69,19L69,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> <path android:fillColor="#00000000" android:pathData="M79,19L79,89" android:strokeWidth="0.8" android:strokeColor="#33FFFFFF" /> </vector>
0
coqui_public_repos/inference-engine/third_party/kenlm/lm/interpolate
coqui_public_repos/inference-engine/third_party/kenlm/lm/interpolate/merge_test/test_bad_order
<unk>secdis
0
coqui_public_repos/STT-examples/net_framework
coqui_public_repos/STT-examples/net_framework/STTWPF/STT.WPF.sln
 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.28307.421 MinimumVisualStudioVersion = 10.0.40219.1 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STT.WPF", "STT.WPF.csproj", "{54BFD766-4305-4F4C-BA59-AF45505DF3C1}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STTClient", "..\..\..\ds\native_client\dotnet\STTClient\STTClient.csproj", "{56DE4091-BBBE-47E4-852D-7268B33B971F}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {54BFD766-4305-4F4C-BA59-AF45505DF3C1}.Debug|x64.ActiveCfg = Debug|x64 {54BFD766-4305-4F4C-BA59-AF45505DF3C1}.Debug|x64.Build.0 = Debug|x64 {54BFD766-4305-4F4C-BA59-AF45505DF3C1}.Release|x64.ActiveCfg = Release|x64 {54BFD766-4305-4F4C-BA59-AF45505DF3C1}.Release|x64.Build.0 = Release|x64 {56DE4091-BBBE-47E4-852D-7268B33B971F}.Debug|x64.ActiveCfg = Debug|x64 {56DE4091-BBBE-47E4-852D-7268B33B971F}.Debug|x64.Build.0 = Debug|x64 {56DE4091-BBBE-47E4-852D-7268B33B971F}.Release|x64.ActiveCfg = Release|x64 {56DE4091-BBBE-47E4-852D-7268B33B971F}.Release|x64.Build.0 = Release|x64 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {19C58802-CCEC-4FD1-8D17-A6EB766116F7} EndGlobalSection EndGlobal
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/xtts/xtts_manager.py
import torch class SpeakerManager(): def __init__(self, speaker_file_path=None): self.speakers = torch.load(speaker_file_path) @property def name_to_id(self): return self.speakers.keys() @property def num_speakers(self): return len(self.name_to_id) @property def speaker_names(self): return list(self.name_to_id.keys()) class LanguageManager(): def __init__(self, config): self.langs = config["languages"] @property def name_to_id(self): return self.langs @property def num_languages(self): return len(self.name_to_id) @property def language_names(self): return list(self.name_to_id)
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/arcfilter.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Function objects to restrict which arcs are traversed in an FST. #ifndef FST_ARCFILTER_H_ #define FST_ARCFILTER_H_ #include <fst/fst.h> #include <fst/util.h> namespace fst { // True for all arcs. template <class Arc> class AnyArcFilter { public: bool operator()(const Arc &arc) const { return true; } }; // True for (input/output) epsilon arcs. template <class Arc> class EpsilonArcFilter { public: bool operator()(const Arc &arc) const { return arc.ilabel == 0 && arc.olabel == 0; } }; // True for input epsilon arcs. template <class Arc> class InputEpsilonArcFilter { public: bool operator()(const Arc &arc) const { return arc.ilabel == 0; } }; // True for output epsilon arcs. template <class Arc> class OutputEpsilonArcFilter { public: bool operator()(const Arc &arc) const { return arc.olabel == 0; } }; // True if specified label matches (doesn't match) when keep_match is // true (false). template <class Arc> class LabelArcFilter { public: using Label = typename Arc::Label; explicit LabelArcFilter(Label label, bool match_input = true, bool keep_match = true) : label_(label), match_input_(match_input), keep_match_(keep_match) {} bool operator()(const Arc &arc) const { const bool match = (match_input_ ? arc.ilabel : arc.olabel) == label_; return keep_match_ ? match : !match; } private: const Label label_; const bool match_input_; const bool keep_match_; }; // True if specified labels match (don't match) when keep_match is true (false). template <class Arc> class MultiLabelArcFilter { public: using Label = typename Arc::Label; explicit MultiLabelArcFilter(bool match_input = true, bool keep_match = true) : match_input_(match_input), keep_match_(keep_match) {} bool operator()(const Arc &arc) const { const Label label = match_input_ ? arc.ilabel : arc.olabel; const bool match = labels_.Find(label) != labels_.End(); return keep_match_ ? match : !match; } void AddLabel(Label label) { labels_.Insert(label); } private: CompactSet<Label, kNoLabel> labels_; const bool match_input_; const bool keep_match_; }; } // namespace fst #endif // FST_ARCFILTER_H_
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/nim_mic_vad_streaming/README.md
## NOTE: This directory contains two sub-directories one for ``WINDOWS`` OS and one for ``LINUX`` OS. Read corresponding READMEs for each OS . Only difference for both OS is the library used for gathering audio data from microphone .On WINDOWS ``portaudio`` is used while on LINUX `ALSA-lib C` is used which itself provides an interface to ALSA Kernel module. Interface to both the libs is provided through NIM code. ## PREREQUISITIES : * ```libstt.so``` Go to the [releases](https://github.com/coqui-ai/STT/releases/latest) page and download the native client package based on your OS and CPU architecture. Extract the ``libstt.so`` and put into the subdirectory depending on OS of native Client used. #### On WINDOWS: * Download the ```native.client.amd64.win.tar.xz ``` package . [ same is true for ``xx.xx.amd64.cuda.win.xx`` if CUDA installed or ``xx.xx.amd64.tflite.win.xx``] * Extract and place the ```libstt.so``` in ```win_nim_vad_streaming``` subdirectory * Now see ``README.md`` in ```win_nim_vad_streaming``` subdirectory. #### On LINUX: * Download the ```native_client.amd64.linux.cpu ``` package .[ same is true for ``xx.xx.amd64.cuda.linux.xx`` is CUDA installed or ``xx.xx.amd64.tflite.linux.xx``] * Extract and place the ```libstt.so``` in ```linux_nim_vad_streaming``` subdirectory * Now see ``README.md`` in ```linux_nim_vad_streaming``` subdirectory. _Note: One can put ``libstt.so`` in the system's PATH rather than copying it to one of subdirectories for easy usage._ ## NOTE: Used NIM code only depends on the shared library(``libstt.so``) used. Given one has downloaded the native client package and extracted the ``libstt.so`` shared library and copied it to one of the subdirectories or in system's PATH ,Code can be modified to add more functionalities in pure NIM and modified code would compile on any platform as long as that platform is supported by NIM.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/script/invert.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_INVERT_H_ #define FST_SCRIPT_INVERT_H_ #include <fst/invert.h> #include <fst/script/fst-class.h> namespace fst { namespace script { template <class Arc> void Invert(MutableFstClass *fst) { Invert(fst->GetMutableFst<Arc>()); } void Invert(MutableFstClass *fst); } // namespace script } // namespace fst #endif // FST_SCRIPT_INVERT_H_
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/tacotron/capacitron_layers.py
import torch from torch import nn from torch.distributions.multivariate_normal import MultivariateNormal as MVN from torch.nn import functional as F class CapacitronVAE(nn.Module): """Effective Use of Variational Embedding Capacity for prosody transfer. See https://arxiv.org/abs/1906.03402""" def __init__( self, num_mel, capacitron_VAE_embedding_dim, encoder_output_dim=256, reference_encoder_out_dim=128, speaker_embedding_dim=None, text_summary_embedding_dim=None, ): super().__init__() # Init distributions self.prior_distribution = MVN( torch.zeros(capacitron_VAE_embedding_dim), torch.eye(capacitron_VAE_embedding_dim) ) self.approximate_posterior_distribution = None # define output ReferenceEncoder dim to the capacitron_VAE_embedding_dim self.encoder = ReferenceEncoder(num_mel, out_dim=reference_encoder_out_dim) # Init beta, the lagrange-like term for the KL distribution self.beta = torch.nn.Parameter(torch.log(torch.exp(torch.Tensor([1.0])) - 1), requires_grad=True) mlp_input_dimension = reference_encoder_out_dim if text_summary_embedding_dim is not None: self.text_summary_net = TextSummary(text_summary_embedding_dim, encoder_output_dim=encoder_output_dim) mlp_input_dimension += text_summary_embedding_dim if speaker_embedding_dim is not None: # TODO: Test a multispeaker model! mlp_input_dimension += speaker_embedding_dim self.post_encoder_mlp = PostEncoderMLP(mlp_input_dimension, capacitron_VAE_embedding_dim) def forward(self, reference_mel_info=None, text_info=None, speaker_embedding=None): # Use reference if reference_mel_info is not None: reference_mels = reference_mel_info[0] # [batch_size, num_frames, num_mels] mel_lengths = reference_mel_info[1] # [batch_size] enc_out = self.encoder(reference_mels, mel_lengths) # concat speaker_embedding and/or text summary embedding if text_info is not None: text_inputs = text_info[0] # [batch_size, num_characters, num_embedding] input_lengths = text_info[1] text_summary_out = self.text_summary_net(text_inputs, input_lengths).to(reference_mels.device) enc_out = torch.cat([enc_out, text_summary_out], dim=-1) if speaker_embedding is not None: speaker_embedding = torch.squeeze(speaker_embedding) enc_out = torch.cat([enc_out, speaker_embedding], dim=-1) # Feed the output of the ref encoder and information about text/speaker into # an MLP to produce the parameteres for the approximate poterior distributions mu, sigma = self.post_encoder_mlp(enc_out) # convert to cpu because prior_distribution was created on cpu mu = mu.cpu() sigma = sigma.cpu() # Sample from the posterior: z ~ q(z|x) self.approximate_posterior_distribution = MVN(mu, torch.diag_embed(sigma)) VAE_embedding = self.approximate_posterior_distribution.rsample() # Infer from the model, bypasses encoding else: # Sample from the prior: z ~ p(z) VAE_embedding = self.prior_distribution.sample().unsqueeze(0) # reshape to [batch_size, 1, capacitron_VAE_embedding_dim] return VAE_embedding.unsqueeze(1), self.approximate_posterior_distribution, self.prior_distribution, self.beta class ReferenceEncoder(nn.Module): """NN module creating a fixed size prosody embedding from a spectrogram. inputs: mel spectrograms [batch_size, num_spec_frames, num_mel] outputs: [batch_size, embedding_dim] """ def __init__(self, num_mel, out_dim): super().__init__() self.num_mel = num_mel filters = [1] + [32, 32, 64, 64, 128, 128] num_layers = len(filters) - 1 convs = [ nn.Conv2d( in_channels=filters[i], out_channels=filters[i + 1], kernel_size=(3, 3), stride=(2, 2), padding=(2, 2) ) for i in range(num_layers) ] self.convs = nn.ModuleList(convs) self.training = False self.bns = nn.ModuleList([nn.BatchNorm2d(num_features=filter_size) for filter_size in filters[1:]]) post_conv_height = self.calculate_post_conv_height(num_mel, 3, 2, 2, num_layers) self.recurrence = nn.LSTM( input_size=filters[-1] * post_conv_height, hidden_size=out_dim, batch_first=True, bidirectional=False ) def forward(self, inputs, input_lengths): batch_size = inputs.size(0) x = inputs.view(batch_size, 1, -1, self.num_mel) # [batch_size, num_channels==1, num_frames, num_mel] valid_lengths = input_lengths.float() # [batch_size] for conv, bn in zip(self.convs, self.bns): x = conv(x) x = bn(x) x = F.relu(x) # Create the post conv width mask based on the valid lengths of the output of the convolution. # The valid lengths for the output of a convolution on varying length inputs is # ceil(input_length/stride) + 1 for stride=3 and padding=2 # For example (kernel_size=3, stride=2, padding=2): # 0 0 x x x x x 0 0 -> Input = 5, 0 is zero padding, x is valid values coming from padding=2 in conv2d # _____ # x _____ # x _____ # x ____ # x # x x x x -> Output valid length = 4 # Since every example in te batch is zero padded and therefore have separate valid_lengths, # we need to mask off all the values AFTER the valid length for each example in the batch. # Otherwise, the convolutions create noise and a lot of not real information valid_lengths = (valid_lengths / 2).float() valid_lengths = torch.ceil(valid_lengths).to(dtype=torch.int64) + 1 # 2 is stride -- size: [batch_size] post_conv_max_width = x.size(2) mask = torch.arange(post_conv_max_width).to(inputs.device).expand( len(valid_lengths), post_conv_max_width ) < valid_lengths.unsqueeze(1) mask = mask.expand(1, 1, -1, -1).transpose(2, 0).transpose(-1, 2) # [batch_size, 1, post_conv_max_width, 1] x = x * mask x = x.transpose(1, 2) # x: 4D tensor [batch_size, post_conv_width, # num_channels==128, post_conv_height] post_conv_width = x.size(1) x = x.contiguous().view(batch_size, post_conv_width, -1) # x: 3D tensor [batch_size, post_conv_width, # num_channels*post_conv_height] # Routine for fetching the last valid output of a dynamic LSTM with varying input lengths and padding post_conv_input_lengths = valid_lengths packed_seqs = nn.utils.rnn.pack_padded_sequence( x, post_conv_input_lengths.tolist(), batch_first=True, enforce_sorted=False ) # dynamic rnn sequence padding self.recurrence.flatten_parameters() _, (ht, _) = self.recurrence(packed_seqs) last_output = ht[-1] return last_output.to(inputs.device) # [B, 128] @staticmethod def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): """Height of spec after n convolutions with fixed kernel/stride/pad.""" for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height class TextSummary(nn.Module): def __init__(self, embedding_dim, encoder_output_dim): super().__init__() self.lstm = nn.LSTM( encoder_output_dim, # text embedding dimension from the text encoder embedding_dim, # fixed length output summary the lstm creates from the input batch_first=True, bidirectional=False, ) def forward(self, inputs, input_lengths): # Routine for fetching the last valid output of a dynamic LSTM with varying input lengths and padding packed_seqs = nn.utils.rnn.pack_padded_sequence( inputs, input_lengths.tolist(), batch_first=True, enforce_sorted=False ) # dynamic rnn sequence padding self.lstm.flatten_parameters() _, (ht, _) = self.lstm(packed_seqs) last_output = ht[-1] return last_output class PostEncoderMLP(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.hidden_size = hidden_size modules = [ nn.Linear(input_size, hidden_size), # Hidden Layer nn.Tanh(), nn.Linear(hidden_size, hidden_size * 2), ] # Output layer twice the size for mean and variance self.net = nn.Sequential(*modules) self.softplus = nn.Softplus() def forward(self, _input): mlp_output = self.net(_input) # The mean parameter is unconstrained mu = mlp_output[:, : self.hidden_size] # The standard deviation must be positive. Parameterise with a softplus sigma = self.softplus(mlp_output[:, self.hidden_size :]) return mu, sigma
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-training-unittests_8k-linux-amd64-py37m-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-ctc-opt" system_setup: > apt-get -qq update && apt-get -qq -y install ${training.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-train-unittests.sh 3.7.6:m" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech on Linux AMD64 CPU training unittests using Python 3.7" description: "Training unittests DeepSpeech LDC93S1 model for Linux/AMD64 using Python 3.7, for CPU only, and optimized version"
0
coqui_public_repos/STT-examples/net_framework
coqui_public_repos/STT-examples/net_framework/STTWPF/MainWindow.xaml
<Window x:Class="STTWPF.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" Title="Deepspeech client" Width="800" Height="600" Loaded="Window_Loaded" WindowStartupLocation="CenterScreen" mc:Ignorable="d"> <Grid> <Grid.RowDefinitions> <RowDefinition Height="222" /> <RowDefinition /> </Grid.RowDefinitions> <TextBox Grid.Row="1" Margin="10,36,10,10" FontSize="16px" Text="{Binding Transcription, Mode=OneWay}" TextWrapping="Wrap" /> <Label Grid.Row="1" Height="26" Margin="10,5,10,0" VerticalAlignment="Top" Content="Results:" /> <Label Height="26" Margin="10,10,10,0" VerticalAlignment="Top" Content="Select an audio file to transcript:" /> <TextBox Height="23" Margin="10,41,10,0" VerticalAlignment="Top" Text="{Binding AudioFilePath, Mode=TwoWay}" TextWrapping="Wrap" /> <Button Width="80" Height="25" Margin="10,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding SelectFileCommand}" Content="Open file" /> <Button Width="82" Height="25" Margin="95,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding EnableExternalScorerCommand}" Content="Enable external scorer" /> <Button Width="75" Height="25" Margin="182,69,0,0" HorizontalAlignment="Left" VerticalAlignment="Top" Command="{Binding InferenceFromFileCommand}" Content="Transcript" /> <Label Height="30" Margin="10,99,10,0" VerticalAlignment="Top" Content="{Binding StatusMessage, Mode=OneWay}" /> <Label Height="26" Margin="10,158,10,0" VerticalAlignment="Top" Content="Select an audio input:" /> <ComboBox Height="23" Margin="20,189,186,0" VerticalAlignment="Top" DisplayMemberPath="FriendlyName" ItemsSource="{Binding AvailableRecordDevices, Mode=TwoWay}" SelectedIndex="0" SelectedItem="{Binding SelectedDevice, Mode=TwoWay}" /> <Button Width="91" Height="23" Margin="0,0,90,10" HorizontalAlignment="Right" VerticalAlignment="Bottom" Command="{Binding StartRecordingCommand}" Content="Record" IsEnabled="{Binding EnableStartRecord, Mode=OneWay}" /> <Button Width="75" Height="23" Margin="0,0,10,10" HorizontalAlignment="Right" VerticalAlignment="Bottom" Command="{Binding StopRecordingCommand}" Content="Stop" IsEnabled="{Binding EnableStopRecord, Mode=OneWay}" /> </Grid> </Window>
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fsttopsort.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. int fsttopsort_main(int argc, char **argv); int main(int argc, char **argv) { return fsttopsort_main(argc, argv); }
0
coqui_public_repos
coqui_public_repos/STT/.compute
#!/bin/bash set -xe apt-get install -y python3-venv libopus0 python3 -m venv /tmp/venv source /tmp/venv/bin/activate pip install -U setuptools wheel pip pip install . pip uninstall -y tensorflow pip install tensorflow-gpu==1.14 mkdir -p ../keep/summaries data="${SHARED_DIR}/data" fis="${data}/LDC/fisher" swb="${data}/LDC/LDC97S62/swb" lbs="${data}/OpenSLR/LibriSpeech/librivox" cv="${data}/mozilla/CommonVoice/en_1087h_2019-06-12/clips" npr="${data}/NPR/WAMU/sets/v0.3" python -u DeepSpeech.py \ --train_files "${npr}/best-train.sdb","${npr}/good-train.sdb","${cv}/train.sdb","${fis}-train.sdb","${swb}-train.sdb","${lbs}-train-clean-100.sdb","${lbs}-train-clean-360.sdb","${lbs}-train-other-500.sdb" \ --dev_files "${lbs}-dev-clean.sdb" \ --test_files "${lbs}-test-clean.sdb" \ --train_batch_size 24 \ --dev_batch_size 48 \ --test_batch_size 48 \ --train_cudnn \ --n_hidden 2048 \ --learning_rate 0.0001 \ --dropout_rate 0.40 \ --epochs 150 \ --noearly_stop \ --feature_cache "../tmp/feature.cache" \ --checkpoint_dir "../keep" \ --summary_dir "../keep/summaries"
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers/tensorrt/tensorrt_provider_factory.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "onnxruntime_c_api.h" #ifdef __cplusplus extern "C" { #endif ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_Tensorrt, _In_ OrtSessionOptions* options, int device_id); #ifdef __cplusplus } #endif
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/test/weight_test.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Regression test for FST weights. #include <cstdlib> #include <ctime> #include <fst/flags.h> #include <fst/log.h> #include <fst/expectation-weight.h> #include <fst/float-weight.h> #include <fst/lexicographic-weight.h> #include <fst/power-weight.h> #include <fst/product-weight.h> #include <fst/set-weight.h> #include <fst/signed-log-weight.h> #include <fst/sparse-power-weight.h> #include <fst/string-weight.h> #include <fst/union-weight.h> #include <fst/test/weight-tester.h> DEFINE_int32(seed, -1, "random seed"); DEFINE_int32(repeat, 10000, "number of test repetitions"); namespace { using fst::Adder; using fst::ExpectationWeight; using fst::GALLIC; using fst::GallicWeight; using fst::LexicographicWeight; using fst::LogWeight; using fst::LogWeightTpl; using fst::MinMaxWeight; using fst::MinMaxWeightTpl; using fst::NaturalLess; using fst::PowerWeight; using fst::ProductWeight; using fst::SetWeight; using fst::SET_INTERSECT_UNION; using fst::SET_UNION_INTERSECT; using fst::SET_BOOLEAN; using fst::SignedLogWeight; using fst::SignedLogWeightTpl; using fst::SparsePowerWeight; using fst::StringWeight; using fst::STRING_LEFT; using fst::STRING_RIGHT; using fst::TropicalWeight; using fst::TropicalWeightTpl; using fst::UnionWeight; using fst::WeightConvert; using fst::WeightGenerate; using fst::WeightTester; template <class T> void TestTemplatedWeights(int repeat) { using TropicalWeightGenerate = WeightGenerate<TropicalWeightTpl<T>>; TropicalWeightGenerate tropical_generate; WeightTester<TropicalWeightTpl<T>, TropicalWeightGenerate> tropical_tester( tropical_generate); tropical_tester.Test(repeat); using LogWeightGenerate = WeightGenerate<LogWeightTpl<T>>; LogWeightGenerate log_generate; WeightTester<LogWeightTpl<T>, LogWeightGenerate> log_tester(log_generate); log_tester.Test(repeat); using MinMaxWeightGenerate = WeightGenerate<MinMaxWeightTpl<T>>; MinMaxWeightGenerate minmax_generate(true); WeightTester<MinMaxWeightTpl<T>, MinMaxWeightGenerate> minmax_tester( minmax_generate); minmax_tester.Test(repeat); using SignedLogWeightGenerate = WeightGenerate<SignedLogWeightTpl<T>>; SignedLogWeightGenerate signedlog_generate; WeightTester<SignedLogWeightTpl<T>, SignedLogWeightGenerate> signedlog_tester(signedlog_generate); signedlog_tester.Test(repeat); } template <class Weight> void TestAdder(int n) { Weight sum = Weight::Zero(); Adder<Weight> adder; for (int i = 0; i < n; ++i) { sum = Plus(sum, Weight::One()); adder.Add(Weight::One()); } CHECK(ApproxEqual(sum, adder.Sum())); } template <class Weight> void TestSignedAdder(int n) { Weight sum = Weight::Zero(); Adder<Weight> adder; const Weight minus_one = Minus(Weight::Zero(), Weight::One()); for (int i = 0; i < n; ++i) { if (i < n/4 || i > 3*n/4) { sum = Plus(sum, Weight::One()); adder.Add(Weight::One()); } else { sum = Minus(sum, Weight::One()); adder.Add(minus_one); } } CHECK(ApproxEqual(sum, adder.Sum())); } template <typename Weight1, typename Weight2> void TestWeightConversion(Weight1 w1) { // Tests round-trp conversion. WeightConvert<Weight2, Weight1> to_w1_; WeightConvert<Weight1, Weight2> to_w2_; Weight2 w2 = to_w2_(w1); Weight1 nw1 = to_w1_(w2); CHECK_EQ(w1, nw1); } template <typename FromWeight, typename ToWeight> void TestWeightCopy(FromWeight w) { // Test copy constructor. const ToWeight to_copied(w); const FromWeight roundtrip_copied(to_copied); CHECK_EQ(w, roundtrip_copied); // Test copy assign. ToWeight to_copy_assigned; to_copy_assigned = w; CHECK_EQ(to_copied, to_copy_assigned); FromWeight roundtrip_copy_assigned; roundtrip_copy_assigned = to_copy_assigned; CHECK_EQ(w, roundtrip_copy_assigned); } template <typename FromWeight, typename ToWeight> void TestWeightMove(FromWeight w) { // Assume FromWeight -> FromWeight copy works. const FromWeight orig(w); ToWeight to_moved(std::move(w)); const FromWeight roundtrip_moved(std::move(to_moved)); CHECK_EQ(orig, roundtrip_moved); // Test move assign. w = orig; ToWeight to_move_assigned; to_move_assigned = std::move(w); FromWeight roundtrip_move_assigned; roundtrip_move_assigned = std::move(to_move_assigned); CHECK_EQ(orig, roundtrip_move_assigned); } template <class Weight> void TestImplicitConversion() { // Only test a few of the operations; assumes they are implemented with the // same pattern. CHECK(Weight(2.0f) == 2.0f); CHECK(Weight(2.0) == 2.0); CHECK(2.0f == Weight(2.0f)); CHECK(2.0 == Weight(2.0)); CHECK_EQ(Weight::Zero(), Times(Weight::Zero(), 3.0f)); CHECK_EQ(Weight::Zero(), Times(Weight::Zero(), 3.0)); CHECK_EQ(Weight::Zero(), Times(3.0, Weight::Zero())); CHECK_EQ(Weight(3.0), Plus(Weight::Zero(), 3.0f)); CHECK_EQ(Weight(3.0), Plus(Weight::Zero(), 3.0)); CHECK_EQ(Weight(3.0), Plus(3.0, Weight::Zero())); } void TestPowerWeightGetSetValue() { PowerWeight<LogWeight, 3> w; // LogWeight has unspecified initial value, so don't check it. w.SetValue(0, LogWeight(2)); w.SetValue(1, LogWeight(3)); CHECK_EQ(LogWeight(2), w.Value(0)); CHECK_EQ(LogWeight(3), w.Value(1)); } void TestSparsePowerWeightGetSetValue() { const LogWeight default_value(17); SparsePowerWeight<LogWeight> w; w.SetDefaultValue(default_value); // All gets should be the default. CHECK_EQ(default_value, w.Value(0)); CHECK_EQ(default_value, w.Value(100)); // First set should fill first_. w.SetValue(10, LogWeight(10)); CHECK_EQ(LogWeight(10), w.Value(10)); w.SetValue(10, LogWeight(20)); CHECK_EQ(LogWeight(20), w.Value(10)); // Add a smaller index. w.SetValue(5, LogWeight(5)); CHECK_EQ(LogWeight(5), w.Value(5)); CHECK_EQ(LogWeight(20), w.Value(10)); // Add some larger indices. w.SetValue(30, LogWeight(30)); CHECK_EQ(LogWeight(5), w.Value(5)); CHECK_EQ(LogWeight(20), w.Value(10)); CHECK_EQ(LogWeight(30), w.Value(30)); w.SetValue(29, LogWeight(29)); CHECK_EQ(LogWeight(5), w.Value(5)); CHECK_EQ(LogWeight(20), w.Value(10)); CHECK_EQ(LogWeight(29), w.Value(29)); CHECK_EQ(LogWeight(30), w.Value(30)); w.SetValue(31, LogWeight(31)); CHECK_EQ(LogWeight(5), w.Value(5)); CHECK_EQ(LogWeight(20), w.Value(10)); CHECK_EQ(LogWeight(29), w.Value(29)); CHECK_EQ(LogWeight(30), w.Value(30)); CHECK_EQ(LogWeight(31), w.Value(31)); // Replace a value. w.SetValue(30, LogWeight(60)); CHECK_EQ(LogWeight(60), w.Value(30)); // Replace a value with the default. CHECK_EQ(5, w.Size()); w.SetValue(30, default_value); CHECK_EQ(default_value, w.Value(30)); CHECK_EQ(4, w.Size()); // Replace lowest index by the default value. w.SetValue(5, default_value); CHECK_EQ(default_value, w.Value(5)); CHECK_EQ(3, w.Size()); // Clear out everything. w.SetValue(31, default_value); w.SetValue(29, default_value); w.SetValue(10, default_value); CHECK_EQ(0, w.Size()); CHECK_EQ(default_value, w.Value(5)); CHECK_EQ(default_value, w.Value(10)); CHECK_EQ(default_value, w.Value(29)); CHECK_EQ(default_value, w.Value(30)); CHECK_EQ(default_value, w.Value(31)); } } // namespace int main(int argc, char **argv) { std::set_new_handler(FailedNewHandler); SET_FLAGS(argv[0], &argc, &argv, true); LOG(INFO) << "Seed = " << FLAGS_seed; srand(FLAGS_seed); TestTemplatedWeights<float>(FLAGS_repeat); TestTemplatedWeights<double>(FLAGS_repeat); FLAGS_fst_weight_parentheses = "()"; TestTemplatedWeights<float>(FLAGS_repeat); TestTemplatedWeights<double>(FLAGS_repeat); FLAGS_fst_weight_parentheses = ""; // Makes sure type names for templated weights are consistent. CHECK(TropicalWeight::Type() == "tropical"); CHECK(TropicalWeightTpl<double>::Type() != TropicalWeightTpl<float>::Type()); CHECK(LogWeight::Type() == "log"); CHECK(LogWeightTpl<double>::Type() != LogWeightTpl<float>::Type()); TropicalWeightTpl<double> w(2.0); TropicalWeight tw(2.0); TestAdder<TropicalWeight>(1000); TestAdder<LogWeight>(1000); TestSignedAdder<SignedLogWeight>(1000); TestImplicitConversion<LogWeight>(); TestImplicitConversion<TropicalWeight>(); TestImplicitConversion<MinMaxWeight>(); TestWeightConversion<TropicalWeight, LogWeight>(2.0); using LeftStringWeight = StringWeight<int>; using LeftStringWeightGenerate = WeightGenerate<LeftStringWeight>; LeftStringWeightGenerate left_string_generate; WeightTester<LeftStringWeight, LeftStringWeightGenerate> left_string_tester( left_string_generate); left_string_tester.Test(FLAGS_repeat); using RightStringWeight = StringWeight<int, STRING_RIGHT>; using RightStringWeightGenerate = WeightGenerate<RightStringWeight>; RightStringWeightGenerate right_string_generate; WeightTester<RightStringWeight, RightStringWeightGenerate> right_string_tester(right_string_generate); right_string_tester.Test(FLAGS_repeat); // STRING_RESTRICT not tested since it requires equal strings, // so would fail. using IUSetWeight = SetWeight<int, SET_INTERSECT_UNION>; using IUSetWeightGenerate = WeightGenerate<IUSetWeight>; IUSetWeightGenerate iu_set_generate; WeightTester<IUSetWeight, IUSetWeightGenerate> iu_set_tester(iu_set_generate); iu_set_tester.Test(FLAGS_repeat); using UISetWeight = SetWeight<int, SET_UNION_INTERSECT>; using UISetWeightGenerate = WeightGenerate<UISetWeight>; UISetWeightGenerate ui_set_generate; WeightTester<UISetWeight, UISetWeightGenerate> ui_set_tester(ui_set_generate); ui_set_tester.Test(FLAGS_repeat); // SET_INTERSECT_UNION_RESTRICT not tested since it requires equal sets, // so would fail. using BoolSetWeight = SetWeight<int, SET_BOOLEAN>; using BoolSetWeightGenerate = WeightGenerate<BoolSetWeight>; BoolSetWeightGenerate bool_set_generate; WeightTester<BoolSetWeight, BoolSetWeightGenerate> bool_set_tester(bool_set_generate); bool_set_tester.Test(FLAGS_repeat); TestWeightConversion<IUSetWeight, UISetWeight>(iu_set_generate()); TestWeightCopy<IUSetWeight, UISetWeight>(iu_set_generate()); TestWeightCopy<IUSetWeight, BoolSetWeight>(iu_set_generate()); TestWeightCopy<UISetWeight, IUSetWeight>(ui_set_generate()); TestWeightCopy<UISetWeight, BoolSetWeight>(ui_set_generate()); TestWeightCopy<BoolSetWeight, IUSetWeight>(bool_set_generate()); TestWeightCopy<BoolSetWeight, UISetWeight>(bool_set_generate()); TestWeightMove<IUSetWeight, UISetWeight>(iu_set_generate()); TestWeightMove<IUSetWeight, BoolSetWeight>(iu_set_generate()); TestWeightMove<UISetWeight, IUSetWeight>(ui_set_generate()); TestWeightMove<UISetWeight, BoolSetWeight>(ui_set_generate()); TestWeightMove<BoolSetWeight, IUSetWeight>(bool_set_generate()); TestWeightMove<BoolSetWeight, UISetWeight>(bool_set_generate()); // COMPOSITE WEIGHTS AND TESTERS - DEFINITIONS using TropicalGallicWeight = GallicWeight<int, TropicalWeight>; using TropicalGallicWeightGenerate = WeightGenerate<TropicalGallicWeight>; TropicalGallicWeightGenerate tropical_gallic_generate(true); WeightTester<TropicalGallicWeight, TropicalGallicWeightGenerate> tropical_gallic_tester(tropical_gallic_generate); using TropicalGenGallicWeight = GallicWeight<int, TropicalWeight, GALLIC>; using TropicalGenGallicWeightGenerate = WeightGenerate<TropicalGenGallicWeight>; TropicalGenGallicWeightGenerate tropical_gen_gallic_generate(false); WeightTester<TropicalGenGallicWeight, TropicalGenGallicWeightGenerate> tropical_gen_gallic_tester(tropical_gen_gallic_generate); using TropicalProductWeight = ProductWeight<TropicalWeight, TropicalWeight>; using TropicalProductWeightGenerate = WeightGenerate<TropicalProductWeight>; TropicalProductWeightGenerate tropical_product_generate; WeightTester<TropicalProductWeight, TropicalProductWeightGenerate> tropical_product_tester(tropical_product_generate); using TropicalLexicographicWeight = LexicographicWeight<TropicalWeight, TropicalWeight>; using TropicalLexicographicWeightGenerate = WeightGenerate<TropicalLexicographicWeight>; TropicalLexicographicWeightGenerate tropical_lexicographic_generate; WeightTester<TropicalLexicographicWeight, TropicalLexicographicWeightGenerate> tropical_lexicographic_tester(tropical_lexicographic_generate); using TropicalCubeWeight = PowerWeight<TropicalWeight, 3>; using TropicalCubeWeightGenerate = WeightGenerate<TropicalCubeWeight>; TropicalCubeWeightGenerate tropical_cube_generate; WeightTester<TropicalCubeWeight, TropicalCubeWeightGenerate> tropical_cube_tester(tropical_cube_generate); using FirstNestedProductWeight = ProductWeight<TropicalProductWeight, TropicalWeight>; using FirstNestedProductWeightGenerate = WeightGenerate<FirstNestedProductWeight>; FirstNestedProductWeightGenerate first_nested_product_generate; WeightTester<FirstNestedProductWeight, FirstNestedProductWeightGenerate> first_nested_product_tester(first_nested_product_generate); using SecondNestedProductWeight = ProductWeight<TropicalWeight, TropicalProductWeight>; using SecondNestedProductWeightGenerate = WeightGenerate<SecondNestedProductWeight>; SecondNestedProductWeightGenerate second_nested_product_generate; WeightTester<SecondNestedProductWeight, SecondNestedProductWeightGenerate> second_nested_product_tester(second_nested_product_generate); using NestedProductCubeWeight = PowerWeight<FirstNestedProductWeight, 3>; using NestedProductCubeWeightGenerate = WeightGenerate<NestedProductCubeWeight>; NestedProductCubeWeightGenerate nested_product_cube_generate; WeightTester<NestedProductCubeWeight, NestedProductCubeWeightGenerate> nested_product_cube_tester(nested_product_cube_generate); using SparseNestedProductCubeWeight = SparsePowerWeight<NestedProductCubeWeight, size_t>; using SparseNestedProductCubeWeightGenerate = WeightGenerate<SparseNestedProductCubeWeight>; SparseNestedProductCubeWeightGenerate sparse_nested_product_cube_generate; WeightTester<SparseNestedProductCubeWeight, SparseNestedProductCubeWeightGenerate> sparse_nested_product_cube_tester(sparse_nested_product_cube_generate); using LogSparsePowerWeight = SparsePowerWeight<LogWeight, size_t>; using LogSparsePowerWeightGenerate = WeightGenerate<LogSparsePowerWeight>; LogSparsePowerWeightGenerate log_sparse_power_generate; WeightTester<LogSparsePowerWeight, LogSparsePowerWeightGenerate> log_sparse_power_tester(log_sparse_power_generate); using LogLogExpectationWeight = ExpectationWeight<LogWeight, LogWeight>; using LogLogExpectationWeightGenerate = WeightGenerate<LogLogExpectationWeight>; LogLogExpectationWeightGenerate log_log_expectation_generate; WeightTester<LogLogExpectationWeight, LogLogExpectationWeightGenerate> log_log_expectation_tester(log_log_expectation_generate); using LogLogSparseExpectationWeight = ExpectationWeight<LogWeight, LogSparsePowerWeight>; using LogLogSparseExpectationWeightGenerate = WeightGenerate<LogLogSparseExpectationWeight>; LogLogSparseExpectationWeightGenerate log_log_sparse_expectation_generate; WeightTester<LogLogSparseExpectationWeight, LogLogSparseExpectationWeightGenerate> log_log_sparse_expectation_tester(log_log_sparse_expectation_generate); struct UnionWeightOptions { using Compare = NaturalLess<TropicalWeight>; struct Merge { TropicalWeight operator()(const TropicalWeight &w1, const TropicalWeight &w2) const { return w1; } }; using ReverseOptions = UnionWeightOptions; }; using TropicalUnionWeight = UnionWeight<TropicalWeight, UnionWeightOptions>; using TropicalUnionWeightGenerate = WeightGenerate<TropicalUnionWeight>; TropicalUnionWeightGenerate tropical_union_generate; WeightTester<TropicalUnionWeight, TropicalUnionWeightGenerate> tropical_union_tester(tropical_union_generate); // COMPOSITE WEIGHTS AND TESTERS - TESTING // Tests composite weight I/O with parentheses. FLAGS_fst_weight_parentheses = "()"; // Unnested composite. tropical_gallic_tester.Test(FLAGS_repeat); tropical_gen_gallic_tester.Test(FLAGS_repeat); tropical_product_tester.Test(FLAGS_repeat); tropical_lexicographic_tester.Test(FLAGS_repeat); tropical_cube_tester.Test(FLAGS_repeat); log_sparse_power_tester.Test(FLAGS_repeat); log_log_expectation_tester.Test(FLAGS_repeat, false); tropical_union_tester.Test(FLAGS_repeat, false); // Nested composite. first_nested_product_tester.Test(FLAGS_repeat); second_nested_product_tester.Test(5); nested_product_cube_tester.Test(FLAGS_repeat); sparse_nested_product_cube_tester.Test(FLAGS_repeat); log_log_sparse_expectation_tester.Test(FLAGS_repeat, false); // ... and tests composite weight I/O without parentheses. FLAGS_fst_weight_parentheses = ""; // Unnested composite. tropical_gallic_tester.Test(FLAGS_repeat); tropical_product_tester.Test(FLAGS_repeat); tropical_lexicographic_tester.Test(FLAGS_repeat); tropical_cube_tester.Test(FLAGS_repeat); log_sparse_power_tester.Test(FLAGS_repeat); log_log_expectation_tester.Test(FLAGS_repeat, false); tropical_union_tester.Test(FLAGS_repeat, false); // Nested composite. second_nested_product_tester.Test(FLAGS_repeat); log_log_sparse_expectation_tester.Test(FLAGS_repeat, false); TestPowerWeightGetSetValue(); TestSparsePowerWeightGetSetValue(); std::cout << "PASS" << std::endl; return 0; }
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/lm/word_index.hh
// Separate header because this is used often. #ifndef LM_WORD_INDEX_H #define LM_WORD_INDEX_H #include <climits> namespace lm { typedef unsigned int WordIndex; const WordIndex kMaxWordIndex = UINT_MAX; const WordIndex kUNK = 0; } // namespace lm typedef lm::WordIndex LMWordIndex; #endif
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/nodejs_mic_vad_streaming/package.json
{ "name": "nodejs_mic_vad_streaming", "version": "0.1.0", "private": true, "dependencies": { "STT": "^1.0.0", "mic": "^2.1.2", "node-vad": "^1.1.4", "speaker": "^0.5.1", "wav": "^1.0.2" } }
0
coqui_public_repos/STT-models/swahili-congo/twb
coqui_public_repos/STT-models/swahili-congo/twb/v0.3.0/alphabet.txt
a b c d e f g h i j k l m n o p q r s t u v w x y z
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_10x-darwin-amd64-opt.yml
build: template_file: test-darwin-opt-base.tyml dependencies: - "darwin-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" - "homebrew_tests-darwin-amd64" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${nodejs.brew.prep_10} args: tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-node-tests.sh 10.x 16k" metadata: name: "DeepSpeech OSX AMD64 CPU NodeJS 10.x tests" description: "Testing DeepSpeech for OSX/AMD64 on NodeJS v10.x, CPU only, optimized version"
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_13x_8k_multiarchpkg-linux-amd64-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "node-package-cpu" - "test-training_8k-linux-amd64-py36m-opt" test_model_task: "test-training_8k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_xenial.prep_13} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 13.x 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU NodeJS MultiArch Package 13.x tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS MultiArch Package v13.x, CPU only, optimized version (8kHz)"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/Makefile.am
AM_CPPFLAGS = -I$(srcdir)/../include $(ICU_CPPFLAGS) if HAVE_SCRIPT lib_LTLIBRARIES = libfstscript.la libfstscript_la_SOURCES = arciterator-class.cc arcsort.cc closure.cc \ compile.cc compose.cc concat.cc connect.cc convert.cc decode.cc \ determinize.cc difference.cc disambiguate.cc draw.cc encode.cc \ encodemapper-class.cc epsnormalize.cc equal.cc equivalent.cc fst-class.cc \ getters.cc info-impl.cc info.cc intersect.cc invert.cc isomorphic.cc map.cc \ minimize.cc print.cc project.cc prune.cc push.cc randequivalent.cc \ randgen.cc relabel.cc replace.cc reverse.cc reweight.cc rmepsilon.cc \ shortest-distance.cc shortest-path.cc stateiterator-class.cc synchronize.cc \ text-io.cc topsort.cc union.cc weight-class.cc verify.cc libfstscript_la_LIBADD = ../lib/libfst.la -lm $(DL_LIBS) libfstscript_la_LDFLAGS = -version-info 10:0:0 endif
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/verify.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/script-impl.h> #include <fst/script/verify.h> namespace fst { namespace script { bool Verify(const FstClass &fst) { VerifyArgs args(fst); Apply<Operation<VerifyArgs>>("Verify", fst.ArcType(), &args); return args.retval; } REGISTER_FST_OPERATION(Verify, StdArc, VerifyArgs); REGISTER_FST_OPERATION(Verify, LogArc, VerifyArgs); REGISTER_FST_OPERATION(Verify, Log64Arc, VerifyArgs); } // namespace script } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/lexicographic-weight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Lexicographic weight set and associated semiring operation definitions. // // A lexicographic weight is a sequence of weights, each of which must have the // path property and Times() must be (strongly) cancellative // (for all a,b,c != Zero(): Times(c, a) = Times(c, b) => a = b, // Times(a, c) = Times(b, c) => a = b). // The + operation on two weights a and b is the lexicographically // prior of a and b. #ifndef FST_LEXICOGRAPHIC_WEIGHT_H_ #define FST_LEXICOGRAPHIC_WEIGHT_H_ #include <cstdlib> #include <string> #include <fst/log.h> #include <fst/pair-weight.h> #include <fst/weight.h> namespace fst { template <class W1, class W2> class LexicographicWeight : public PairWeight<W1, W2> { public: using ReverseWeight = LexicographicWeight<typename W1::ReverseWeight, typename W2::ReverseWeight>; using PairWeight<W1, W2>::Value1; using PairWeight<W1, W2>::Value2; using PairWeight<W1, W2>::SetValue1; using PairWeight<W1, W2>::SetValue2; using PairWeight<W1, W2>::Zero; using PairWeight<W1, W2>::One; using PairWeight<W1, W2>::NoWeight; using PairWeight<W1, W2>::Quantize; using PairWeight<W1, W2>::Reverse; LexicographicWeight() {} explicit LexicographicWeight(const PairWeight<W1, W2> &w) : PairWeight<W1, W2>(w) {} LexicographicWeight(W1 w1, W2 w2) : PairWeight<W1, W2>(w1, w2) { if ((W1::Properties() & kPath) != kPath) { FSTERROR() << "LexicographicWeight must " << "have the path property: " << W1::Type(); SetValue1(W1::NoWeight()); } if ((W2::Properties() & kPath) != kPath) { FSTERROR() << "LexicographicWeight must " << "have the path property: " << W2::Type(); SetValue2(W2::NoWeight()); } } static const LexicographicWeight &Zero() { static const LexicographicWeight zero(PairWeight<W1, W2>::Zero()); return zero; } static const LexicographicWeight &One() { static const LexicographicWeight one(PairWeight<W1, W2>::One()); return one; } static const LexicographicWeight &NoWeight() { static const LexicographicWeight no_weight(PairWeight<W1, W2>::NoWeight()); return no_weight; } static const string &Type() { static const string *const type = new string(W1::Type() + "_LT_" + W2::Type()); return *type; } bool Member() const { if (!Value1().Member() || !Value2().Member()) return false; // Lexicographic weights cannot mix zeroes and non-zeroes. if (Value1() == W1::Zero() && Value2() == W2::Zero()) return true; if (Value1() != W1::Zero() && Value2() != W2::Zero()) return true; return false; } LexicographicWeight Quantize(float delta = kDelta) const { return LexicographicWeight(PairWeight<W1, W2>::Quantize()); } ReverseWeight Reverse() const { return ReverseWeight(PairWeight<W1, W2>::Reverse()); } static constexpr uint64 Properties() { return W1::Properties() & W2::Properties() & (kLeftSemiring | kRightSemiring | kPath | kIdempotent | kCommutative); } }; template <class W1, class W2> inline LexicographicWeight<W1, W2> Plus(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v) { if (!w.Member() || !v.Member()) { return LexicographicWeight<W1, W2>::NoWeight(); } NaturalLess<W1> less1; NaturalLess<W2> less2; if (less1(w.Value1(), v.Value1())) return w; if (less1(v.Value1(), w.Value1())) return v; if (less2(w.Value2(), v.Value2())) return w; if (less2(v.Value2(), w.Value2())) return v; return w; } template <class W1, class W2> inline LexicographicWeight<W1, W2> Times(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v) { return LexicographicWeight<W1, W2>(Times(w.Value1(), v.Value1()), Times(w.Value2(), v.Value2())); } template <class W1, class W2> inline LexicographicWeight<W1, W2> Divide(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v, DivideType typ = DIVIDE_ANY) { return LexicographicWeight<W1, W2>(Divide(w.Value1(), v.Value1(), typ), Divide(w.Value2(), v.Value2(), typ)); } // This function object generates weights by calling the underlying generators // for the templated weight types, like all other pair weight types. However, // for lexicographic weights, we cannot generate zeroes for the two subweights // separately: weights are members iff both members are zero or both members // are non-zero. This is intended primarily for testing. template <class W1, class W2> class WeightGenerate<LexicographicWeight<W1, W2>> { public: using Weight = LexicographicWeight<W1, W1>; using Generate1 = WeightGenerate<W1>; using Generate2 = WeightGenerate<W2>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : generator1_(false, num_random_weights), generator2_(false, num_random_weights), allow_zero_(allow_zero), num_random_weights_(num_random_weights) {} Weight operator()() const { if (allow_zero_) { const int n = rand() % (num_random_weights_ + 1); // NOLINT if (n == num_random_weights_) return Weight(W1::Zero(), W2::Zero()); } return Weight(generator1_(), generator2_()); } private: const Generate1 generator1_; const Generate2 generator2_; // Permits Zero() and zero divisors. const bool allow_zero_; // The number of alternative random weights. const size_t num_random_weights_; }; } // namespace fst #endif // FST_LEXICOGRAPHIC_WEIGHT_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-generate_scorer-android-24-armv7-opt.yml
build: template_file: test-android-opt-base.tyml dependencies: - "android-armv7-cpu-opt" - "kenlm_android-armv7-cpu-opt" - "android-cache-armeabi-v7a-android-24" cache: url: ${system.android_cache.armeabi_v7a.android_24.url} namespace: ${system.android_cache.armeabi_v7a.android_24.namespace} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-scorer-tests.sh ${system.kenlm.android_armv7_cpu.url} android armeabi-v7a android-24" workerType: "${docker.dsTests}" metadata: name: "Testing DeepSpeech Android 7.0 ARMv7 CPU generate scorer" description: "Generate a DeepSpeech Scorer for Android 7.0/ARMv7, CPU only, optimized version"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/script/push.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/push.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Push(MutableFstClass *fst, ReweightType rew_type, float delta, bool remove_total_weight) { PushArgs1 args(fst, rew_type, delta, remove_total_weight); Apply<Operation<PushArgs1>>("Push", fst->ArcType(), &args); } void Push(const FstClass &ifst, MutableFstClass *ofst, uint32 flags, ReweightType rew_type, float delta) { if (!internal::ArcTypesMatch(ifst, *ofst, "Push")) { ofst->SetProperties(kError, kError); return; } PushArgs2 args(ifst, ofst, flags, rew_type, delta); Apply<Operation<PushArgs2>>("Push", ifst.ArcType(), &args); } REGISTER_FST_OPERATION(Push, StdArc, PushArgs1); REGISTER_FST_OPERATION(Push, LogArc, PushArgs1); REGISTER_FST_OPERATION(Push, Log64Arc, PushArgs1); REGISTER_FST_OPERATION(Push, StdArc, PushArgs2); REGISTER_FST_OPERATION(Push, LogArc, PushArgs2); REGISTER_FST_OPERATION(Push, Log64Arc, PushArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/string_piece_hash.hh
#ifndef UTIL_STRING_PIECE_HASH_H #define UTIL_STRING_PIECE_HASH_H #include "util/string_piece.hh" #include <boost/functional/hash.hpp> #include <boost/version.hpp> inline size_t hash_value(const StringPiece &str) { return boost::hash_range(str.data(), str.data() + str.length()); } /* Support for lookup of StringPiece in boost::unordered_map<std::string> */ struct StringPieceCompatibleHash : public std::unary_function<const StringPiece &, size_t> { size_t operator()(const StringPiece &str) const { return hash_value(str); } }; struct StringPieceCompatibleEquals : public std::binary_function<const StringPiece &, const std::string &, bool> { bool operator()(const StringPiece &first, const StringPiece &second) const { return first == second; } }; template <class T> typename T::const_iterator FindStringPiece(const T &t, const StringPiece &key) { #if BOOST_VERSION < 104200 std::string temp(key.data(), key.size()); return t.find(temp); #else return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); #endif } template <class T> typename T::iterator FindStringPiece(T &t, const StringPiece &key) { #if BOOST_VERSION < 104200 std::string temp(key.data(), key.size()); return t.find(temp); #else return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); #endif } #endif // UTIL_STRING_PIECE_HASH_H
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/kenlm/MANIFEST.in
# file GENERATED by distutils, do NOT edit include setup.py include lm/*.cc include lm/*.hh include python/*.cpp include util/*.cc include util/*.hh include util/double-conversion/*.cc include util/double-conversion/*.h
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_10x-armbian-arm64-opt.yml
build: template_file: test-armbian-opt-base.tyml dependencies: - "linux-arm64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_buster.prep_10} && ${nodejs.packages_buster.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_buster.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node_tflite-tests.sh 10.x 16k" metadata: name: "DeepSpeech ARMbian ARM64 Cortex-A53 CPU NodeJS 10.x tests" description: "Testing DeepSpeech for ARMbian ARM64 Cortex-A53 on NodeJS v10.x, CPU only, optimized version"
0
coqui_public_repos/TTS/TTS
coqui_public_repos/TTS/TTS/config/shared_configs.py
from dataclasses import asdict, dataclass from typing import List from coqpit import Coqpit, check_argument from trainer import TrainerConfig @dataclass class BaseAudioConfig(Coqpit): """Base config to definge audio processing parameters. It is used to initialize ```TTS.utils.audio.AudioProcessor.``` Args: fft_size (int): Number of STFT frequency levels aka.size of the linear spectogram frame. Defaults to 1024. win_length (int): Each frame of audio is windowed by window of length ```win_length``` and then padded with zeros to match ```fft_size```. Defaults to 1024. hop_length (int): Number of audio samples between adjacent STFT columns. Defaults to 1024. frame_shift_ms (int): Set ```hop_length``` based on milliseconds and sampling rate. frame_length_ms (int): Set ```win_length``` based on milliseconds and sampling rate. stft_pad_mode (str): Padding method used in STFT. 'reflect' or 'center'. Defaults to 'reflect'. sample_rate (int): Audio sampling rate. Defaults to 22050. resample (bool): Enable / Disable resampling audio to ```sample_rate```. Defaults to ```False```. preemphasis (float): Preemphasis coefficient. Defaults to 0.0. ref_level_db (int): 20 Reference Db level to rebase the audio signal and ignore the level below. 20Db is assumed the sound of air. Defaults to 20. do_sound_norm (bool): Enable / Disable sound normalization to reconcile the volume differences among samples. Defaults to False. log_func (str): Numpy log function used for amplitude to DB conversion. Defaults to 'np.log10'. do_trim_silence (bool): Enable / Disable trimming silences at the beginning and the end of the audio clip. Defaults to ```True```. do_amp_to_db_linear (bool, optional): enable/disable amplitude to dB conversion of linear spectrograms. Defaults to True. do_amp_to_db_mel (bool, optional): enable/disable amplitude to dB conversion of mel spectrograms. Defaults to True. pitch_fmax (float, optional): Maximum frequency of the F0 frames. Defaults to ```640```. pitch_fmin (float, optional): Minimum frequency of the F0 frames. Defaults to ```1```. trim_db (int): Silence threshold used for silence trimming. Defaults to 45. do_rms_norm (bool, optional): enable/disable RMS volume normalization when loading an audio file. Defaults to False. db_level (int, optional): dB level used for rms normalization. The range is -99 to 0. Defaults to None. power (float): Exponent used for expanding spectrogra levels before running Griffin Lim. It helps to reduce the artifacts in the synthesized voice. Defaults to 1.5. griffin_lim_iters (int): Number of Griffing Lim iterations. Defaults to 60. num_mels (int): Number of mel-basis frames that defines the frame lengths of each mel-spectrogram frame. Defaults to 80. mel_fmin (float): Min frequency level used for the mel-basis filters. ~50 for male and ~95 for female voices. It needs to be adjusted for a dataset. Defaults to 0. mel_fmax (float): Max frequency level used for the mel-basis filters. It needs to be adjusted for a dataset. spec_gain (int): Gain applied when converting amplitude to DB. Defaults to 20. signal_norm (bool): enable/disable signal normalization. Defaults to True. min_level_db (int): minimum db threshold for the computed melspectrograms. Defaults to -100. symmetric_norm (bool): enable/disable symmetric normalization. If set True normalization is performed in the range [-k, k] else [0, k], Defaults to True. max_norm (float): ```k``` defining the normalization range. Defaults to 4.0. clip_norm (bool): enable/disable clipping the our of range values in the normalized audio signal. Defaults to True. stats_path (str): Path to the computed stats file. Defaults to None. """ # stft parameters fft_size: int = 1024 win_length: int = 1024 hop_length: int = 256 frame_shift_ms: int = None frame_length_ms: int = None stft_pad_mode: str = "reflect" # audio processing parameters sample_rate: int = 22050 resample: bool = False preemphasis: float = 0.0 ref_level_db: int = 20 do_sound_norm: bool = False log_func: str = "np.log10" # silence trimming do_trim_silence: bool = True trim_db: int = 45 # rms volume normalization do_rms_norm: bool = False db_level: float = None # griffin-lim params power: float = 1.5 griffin_lim_iters: int = 60 # mel-spec params num_mels: int = 80 mel_fmin: float = 0.0 mel_fmax: float = None spec_gain: int = 20 do_amp_to_db_linear: bool = True do_amp_to_db_mel: bool = True # f0 params pitch_fmax: float = 640.0 pitch_fmin: float = 1.0 # normalization params signal_norm: bool = True min_level_db: int = -100 symmetric_norm: bool = True max_norm: float = 4.0 clip_norm: bool = True stats_path: str = None def check_values( self, ): """Check config fields""" c = asdict(self) check_argument("num_mels", c, restricted=True, min_val=10, max_val=2056) check_argument("fft_size", c, restricted=True, min_val=128, max_val=4058) check_argument("sample_rate", c, restricted=True, min_val=512, max_val=100000) check_argument( "frame_length_ms", c, restricted=True, min_val=10, max_val=1000, alternative="win_length", ) check_argument("frame_shift_ms", c, restricted=True, min_val=1, max_val=1000, alternative="hop_length") check_argument("preemphasis", c, restricted=True, min_val=0, max_val=1) check_argument("min_level_db", c, restricted=True, min_val=-1000, max_val=10) check_argument("ref_level_db", c, restricted=True, min_val=0, max_val=1000) check_argument("power", c, restricted=True, min_val=1, max_val=5) check_argument("griffin_lim_iters", c, restricted=True, min_val=10, max_val=1000) # normalization parameters check_argument("signal_norm", c, restricted=True) check_argument("symmetric_norm", c, restricted=True) check_argument("max_norm", c, restricted=True, min_val=0.1, max_val=1000) check_argument("clip_norm", c, restricted=True) check_argument("mel_fmin", c, restricted=True, min_val=0.0, max_val=1000) check_argument("mel_fmax", c, restricted=True, min_val=500.0, allow_none=True) check_argument("spec_gain", c, restricted=True, min_val=1, max_val=100) check_argument("do_trim_silence", c, restricted=True) check_argument("trim_db", c, restricted=True) @dataclass class BaseDatasetConfig(Coqpit): """Base config for TTS datasets. Args: formatter (str): Formatter name that defines used formatter in ```TTS.tts.datasets.formatter```. Defaults to `""`. dataset_name (str): Unique name for the dataset. Defaults to `""`. path (str): Root path to the dataset files. Defaults to `""`. meta_file_train (str): Name of the dataset meta file. Or a list of speakers to be ignored at training for multi-speaker datasets. Defaults to `""`. ignored_speakers (List): List of speakers IDs that are not used at the training. Default None. language (str): Language code of the dataset. If defined, it overrides `phoneme_language`. Defaults to `""`. phonemizer (str): Phonemizer used for that dataset's language. By default it uses `DEF_LANG_TO_PHONEMIZER`. Defaults to `""`. meta_file_val (str): Name of the dataset meta file that defines the instances used at validation. meta_file_attn_mask (str): Path to the file that lists the attention mask files used with models that require attention masks to train the duration predictor. """ formatter: str = "" dataset_name: str = "" path: str = "" meta_file_train: str = "" ignored_speakers: List[str] = None language: str = "" phonemizer: str = "" meta_file_val: str = "" meta_file_attn_mask: str = "" def check_values( self, ): """Check config fields""" c = asdict(self) check_argument("formatter", c, restricted=True) check_argument("path", c, restricted=True) check_argument("meta_file_train", c, restricted=True) check_argument("meta_file_val", c, restricted=False) check_argument("meta_file_attn_mask", c, restricted=False) @dataclass class BaseTrainingConfig(TrainerConfig): """Base config to define the basic 🐸TTS training parameters that are shared among all the models. It is based on ```Trainer.TrainingConfig```. Args: model (str): Name of the model that is used in the training. num_loader_workers (int): Number of workers for training time dataloader. num_eval_loader_workers (int): Number of workers for evaluation time dataloader. """ model: str = None # dataloading num_loader_workers: int = 0 num_eval_loader_workers: int = 0 use_noise_augment: bool = False
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/lock.h
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Google-compatibility locking declarations and inline definitions. #ifndef FST_LIB_LOCK_H_ #define FST_LIB_LOCK_H_ #include <mutex> namespace fst { using namespace std; class Mutex { public: Mutex() {} inline void Lock() { mu_.lock(); } inline void Unlock() { mu_.unlock(); } private: std::mutex mu_; Mutex(const Mutex &) = delete; Mutex &operator=(const Mutex &) = delete; }; class MutexLock { public: explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); } ~MutexLock() { mu_->Unlock(); } private: Mutex *mu_; MutexLock(const MutexLock &) = delete; MutexLock &operator=(const MutexLock &) = delete; }; // Currently, we don't use a separate reader lock. // TODO(kbg): Implement this with std::shared_mutex once C++17 becomes widely // available. using ReaderMutexLock = MutexLock; } // namespace fst #endif // FST_LIB_LOCK_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-electronjs_v10.0-darwin-amd64-opt.yml
build: template_file: test-darwin-opt-base.tyml dependencies: - "darwin-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" - "homebrew_tests-darwin-amd64" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${nodejs.brew.prep_12} args: tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 10.0.0 16k" metadata: name: "DeepSpeech OSX AMD64 CPU ElectronJS v10.0 tests" description: "Testing DeepSpeech for OSX/AMD64 on ElectronJS v10.0, CPU only, optimized version"
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/lm/binary_format.hh
#ifndef LM_BINARY_FORMAT_H #define LM_BINARY_FORMAT_H #include "lm/config.hh" #include "lm/model_type.hh" #include "lm/read_arpa.hh" #include "util/file_piece.hh" #include "util/mmap.hh" #include "util/scoped.hh" #include <cstddef> #include <vector> #include <stdint.h> namespace lm { namespace ngram { extern const char *kModelNames[6]; /*Inspect a file to determine if it is a binary lm. If not, return false. * If so, return true and set recognized to the type. This is the only API in * this header designed for use by decoder authors. */ bool RecognizeBinary(const char *file, ModelType &recognized); struct FixedWidthParameters { unsigned char order; float probing_multiplier; // What type of model is this? ModelType model_type; // Does the end of the file have the actual strings in the vocabulary? bool has_vocabulary; unsigned int search_version; }; // This is a macro instead of an inline function so constants can be assigned using it. #define ALIGN8(a) ((std::ptrdiff_t(((a)-1)/8)+1)*8) // Parameters stored in the header of a binary file. struct Parameters { FixedWidthParameters fixed; std::vector<uint64_t> counts; }; class BinaryFormat { public: explicit BinaryFormat(const Config &config); // Reading a binary file: // Takes ownership of fd void InitializeBinary(int fd, ModelType model_type, unsigned int search_version, Parameters &params); // Used to read parts of the file to update the config object before figuring out full size. void ReadForConfig(void *to, std::size_t amount, uint64_t offset_excluding_header) const; // Actually load the binary file and return a pointer to the beginning of the search area. void *LoadBinary(std::size_t size); uint64_t VocabStringReadingOffset() const { assert(vocab_string_offset_ != kInvalidOffset); return vocab_string_offset_; } // Writing a binary file or initializing in RAM from ARPA: // Size for vocabulary. void *SetupJustVocab(std::size_t memory_size, uint8_t order); // Warning: can change the vocaulary base pointer. void *GrowForSearch(std::size_t memory_size, std::size_t vocab_pad, void *&vocab_base); // Warning: can change vocabulary and search base addresses. void WriteVocabWords(const std::string &buffer, void *&vocab_base, void *&search_base); // Write the header at the beginning of the file. void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector<uint64_t> &counts); private: void MapFile(void *&vocab_base, void *&search_base); // Copied from configuration. const Config::WriteMethod write_method_; const char *write_mmap_; util::LoadMethod load_method_; // File behind memory, if any. util::scoped_fd file_; // If there is a file involved, a single mapping. util::scoped_memory mapping_; // If the data is only in memory, separately allocate each because the trie // knows vocab's size before it knows search's size (because SRILM might // have pruned). util::scoped_memory memory_vocab_, memory_search_; // Memory ranges. Note that these may not be contiguous and may not all // exist. std::size_t header_size_, vocab_size_, vocab_pad_; // aka end of search. uint64_t vocab_string_offset_; static const uint64_t kInvalidOffset = (uint64_t)-1; }; bool IsBinaryFormat(int fd); } // namespace ngram } // namespace lm #endif // LM_BINARY_FORMAT_H
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/getters.cc
#include <fst/script/getters.h> namespace fst { namespace script { bool GetArcSortType(const string &str, ArcSortType *sort_type) { if (str == "ilabel") { *sort_type = ILABEL_SORT; } else if (str == "olabel") { *sort_type = OLABEL_SORT; } else { return false; } return true; } bool GetComposeFilter(const string &str, ComposeFilter *compose_filter) { if (str == "alt_sequence") { *compose_filter = ALT_SEQUENCE_FILTER; } else if (str == "auto") { *compose_filter = AUTO_FILTER; } else if (str == "match") { *compose_filter = MATCH_FILTER; } else if (str == "null") { *compose_filter = NULL_FILTER; } else if (str == "sequence") { *compose_filter = SEQUENCE_FILTER; } else if (str == "trivial") { *compose_filter = TRIVIAL_FILTER; } else { return false; } return true; } bool GetDeterminizeType(const string &str, DeterminizeType *det_type) { if (str == "functional") { *det_type = DETERMINIZE_FUNCTIONAL; } else if (str == "nonfunctional") { *det_type = DETERMINIZE_NONFUNCTIONAL; } else if (str == "disambiguate") { *det_type = DETERMINIZE_DISAMBIGUATE; } else { return false; } return true; } bool GetMapType(const string &str, MapType *map_type) { if (str == "arc_sum") { *map_type = ARC_SUM_MAPPER; } else if (str == "arc_unique") { *map_type = ARC_UNIQUE_MAPPER; } else if (str == "identity") { *map_type = IDENTITY_MAPPER; } else if (str == "input_epsilon") { *map_type = INPUT_EPSILON_MAPPER; } else if (str == "invert") { *map_type = INVERT_MAPPER; } else if (str == "output_epsilon") { *map_type = OUTPUT_EPSILON_MAPPER; } else if (str == "plus") { *map_type = PLUS_MAPPER; } else if (str == "power") { *map_type = POWER_MAPPER; } else if (str == "quantize") { *map_type = QUANTIZE_MAPPER; } else if (str == "rmweight") { *map_type = RMWEIGHT_MAPPER; } else if (str == "superfinal") { *map_type = SUPERFINAL_MAPPER; } else if (str == "times") { *map_type = TIMES_MAPPER; } else if (str == "to_log") { *map_type = TO_LOG_MAPPER; } else if (str == "to_log64") { *map_type = TO_LOG64_MAPPER; } else if (str == "to_std" || str == "to_standard") { *map_type = TO_STD_MAPPER; } else { return false; } return true; } bool GetRandArcSelection(const string &str, RandArcSelection *ras) { if (str == "uniform") { *ras = UNIFORM_ARC_SELECTOR; } else if (str == "log_prob") { *ras = LOG_PROB_ARC_SELECTOR; } else if (str == "fast_log_prob") { *ras = FAST_LOG_PROB_ARC_SELECTOR; } else { return false; } return true; } bool GetQueueType(const string &str, QueueType *queue_type) { if (str == "auto") { *queue_type = AUTO_QUEUE; } else if (str == "fifo") { *queue_type = FIFO_QUEUE; } else if (str == "lifo") { *queue_type = LIFO_QUEUE; } else if (str == "shortest") { *queue_type = SHORTEST_FIRST_QUEUE; } else if (str == "state") { *queue_type = STATE_ORDER_QUEUE; } else if (str == "top") { *queue_type = TOP_ORDER_QUEUE; } else { return false; } return true; } bool GetReplaceLabelType(const string &str, bool epsilon_on_replace, ReplaceLabelType *rlt) { if (epsilon_on_replace || str == "neither") { *rlt = REPLACE_LABEL_NEITHER; } else if (str == "input") { *rlt = REPLACE_LABEL_INPUT; } else if (str == "output") { *rlt = REPLACE_LABEL_OUTPUT; } else if (str == "both") { *rlt = REPLACE_LABEL_BOTH; } else { return false; } return true; } } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/compact-fst.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // FST Class for memory-efficient representation of common types of // FSTs: linear automata, acceptors, unweighted FSTs, ... #ifndef FST_COMPACT_FST_H_ #define FST_COMPACT_FST_H_ #include <climits> #include <iterator> #include <memory> #include <tuple> #include <utility> #include <vector> #include <fst/log.h> #include <fst/cache.h> #include <fst/expanded-fst.h> #include <fst/fst-decl.h> // For optional argument declarations #include <fst/mapped-file.h> #include <fst/matcher.h> #include <fst/test-properties.h> #include <fst/util.h> namespace fst { struct CompactFstOptions : public CacheOptions { // The default caching behaviour is to do no caching. Most compactors are // cheap and therefore we save memory by not doing caching. CompactFstOptions() : CacheOptions(true, 0) {} explicit CompactFstOptions(const CacheOptions &opts) : CacheOptions(opts) {} }; // New upcoming (Fst) Compactor interface - currently used internally // by CompactFstImpl. // // class Compactor { // public: // // Constructor from the Fst to be compacted. // Compactor(const Fst<Arc> &fst, ...); // // Copy constructor // Compactor(const Compactor &compactor, bool safe = false) // // Default constructor (optional, see comment below). // Compactor(); // // // Returns the start state, number of states, and total number of arcs // // of the compacted Fst // StateId Start() const; // StateId NumStates() const; // size_t NumArcs() const; // // // Accessor class for state attributes. // class State { // public: // State(); // Required, corresponds to kNoStateId. // State(const Compactor *c, StateId); // Accessor for StateId 's'. // StateId GetStateId() const; // Weight Final() const; // size_t NumArcs() const; // Arc GetArc(size_t i) const; // }; // // // Modifies 'state' accessor to provide access to state id 's'. // void SetState(StateId s, State *state); // // Tests whether 'fst' can be compacted by this compactor. // bool IsCompatible(const Fst<A> &fst) const; // // Return the properties that are always true for an fst // // compacted using this compactor // uint64 Properties() const; // // Return a string identifying the type of compactor. // static const string &Type(); // // Return true if an error has occured. // bool Error() const; // // Writes a compactor to a file. // bool Write(std::ostream &strm, const FstWriteOptions &opts) const; // // Reads a compactor from a file. // static Compactor*Read(std::istream &strm, const FstReadOptions &opts, // const FstHeader &hdr); // }; // // Old (Arc) Compactor Interface: // // The ArcCompactor class determines how arcs and final weights are compacted // and expanded. // // Final weights are treated as transitions to the superfinal state, i.e., // ilabel = olabel = kNoLabel and nextstate = kNoStateId. // // There are two types of compactors: // // * Fixed out-degree compactors: 'compactor.Size()' returns a positive integer // 's'. An FST can be compacted by this compactor only if each state has // exactly 's' outgoing transitions (counting a non-Zero() final weight as a // transition). A typical example is a compactor for string FSTs, i.e., // 's == 1'. // // * Variable out-degree compactors: 'compactor.Size() == -1'. There are no // out-degree restrictions for these compactors. // // Interface: // // class ArcCompactor { // public: // // Element is the type of the compacted transitions. // using Element = ... // // // Returns the compacted representation of a transition 'arc' // // at a state 's'. // Element Compact(StateId s, const Arc &arc); // // // Returns the transition at state 's' represented by the compacted // // transition 'e'. // Arc Expand(StateId s, const Element &e) const; // // // Returns -1 for variable out-degree compactors, and the mandatory // // out-degree otherwise. // ssize_t Size() const; // // // Tests whether an FST can be compacted by this compactor. // bool Compatible(const Fst<A> &fst) const; // // // Returns the properties that are always true for an FST compacted using // // this compactor // uint64 Properties() const; // // // Returns a string identifying the type of compactor. // static const string &Type(); // // // Writes a compactor to a file. // bool Write(std::ostream &strm) const; // // // Reads a compactor from a file. // static ArcCompactor *Read(std::istream &strm); // // // Default constructor (optional, see comment below). // ArcCompactor(); // }; // // The default constructor is only required for FST_REGISTER to work (i.e., // enabling Convert() and the command-line utilities to work with this new // compactor). However, a default constructor always needs to be specified for // this code to compile, but one can have it simply raise an error when called, // like so: // // Compactor::Compactor() { // FSTERROR() << "Compactor: No default constructor"; // } // Default implementation data for CompactFst, which can shared between // otherwise independent copies. // // The implementation contains two arrays: 'states_' and 'compacts_'. // // For fixed out-degree compactors, the 'states_' array is unallocated. The // 'compacts_' contains the compacted transitions. Its size is 'ncompacts_'. // The outgoing transitions at a given state are stored consecutively. For a // given state 's', its 'compactor.Size()' outgoing transitions (including // superfinal transition when 's' is final), are stored in position // ['s*compactor.Size()', '(s+1)*compactor.Size()'). // // For variable out-degree compactors, the states_ array has size // 'nstates_ + 1' and contains pointers to positions into 'compacts_'. For a // given state 's', the compacted transitions of 's' are stored in positions // ['states_[s]', 'states_[s + 1]') in 'compacts_'. By convention, // 'states_[nstates_] == ncompacts_'. // // In both cases, the superfinal transitions (when 's' is final, i.e., // 'Final(s) != Weight::Zero()') are stored first. // // The unsigned type U is used to represent indices into the compacts_ array. template <class Element, class Unsigned> class DefaultCompactStore { public: DefaultCompactStore() : states_(nullptr), compacts_(nullptr), nstates_(0), ncompacts_(0), narcs_(0), start_(kNoStateId), error_(false) {} template <class Arc, class Compactor> DefaultCompactStore(const Fst<Arc> &fst, const Compactor &compactor); template <class Iterator, class Compactor> DefaultCompactStore(const Iterator &begin, const Iterator &end, const Compactor &compactor); ~DefaultCompactStore() { if (!states_region_) delete[] states_; if (!compacts_region_) delete[] compacts_; } template <class Compactor> static DefaultCompactStore<Element, Unsigned> *Read( std::istream &strm, const FstReadOptions &opts, const FstHeader &hdr, const Compactor &compactor); bool Write(std::ostream &strm, const FstWriteOptions &opts) const; Unsigned States(ssize_t i) const { return states_[i]; } const Element &Compacts(size_t i) const { return compacts_[i]; } size_t NumStates() const { return nstates_; } size_t NumCompacts() const { return ncompacts_; } size_t NumArcs() const { return narcs_; } ssize_t Start() const { return start_; } bool Error() const { return error_; } // Returns a string identifying the type of data storage container. static const string &Type(); private: std::unique_ptr<MappedFile> states_region_; std::unique_ptr<MappedFile> compacts_region_; Unsigned *states_; Element *compacts_; size_t nstates_; size_t ncompacts_; size_t narcs_; ssize_t start_; bool error_; }; template <class Element, class Unsigned> template <class Arc, class Compactor> DefaultCompactStore<Element, Unsigned>::DefaultCompactStore( const Fst<Arc> &fst, const Compactor &compactor) : states_(nullptr), compacts_(nullptr), nstates_(0), ncompacts_(0), narcs_(0), start_(kNoStateId), error_(false) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; start_ = fst.Start(); // Counts # of states and arcs. StateId nfinals = 0; for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) { ++nstates_; const auto s = siter.Value(); for (ArcIterator<Fst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) { ++narcs_; } if (fst.Final(s) != Weight::Zero()) ++nfinals; } if (compactor.Size() == -1) { states_ = new Unsigned[nstates_ + 1]; ncompacts_ = narcs_ + nfinals; compacts_ = new Element[ncompacts_]; states_[nstates_] = ncompacts_; } else { states_ = nullptr; ncompacts_ = nstates_ * compactor.Size(); if ((narcs_ + nfinals) != ncompacts_) { FSTERROR() << "DefaultCompactStore: Compactor incompatible with FST"; error_ = true; return; } compacts_ = new Element[ncompacts_]; } size_t pos = 0; size_t fpos = 0; for (size_t s = 0; s < nstates_; ++s) { fpos = pos; if (compactor.Size() == -1) states_[s] = pos; if (fst.Final(s) != Weight::Zero()) { compacts_[pos++] = compactor.Compact( s, Arc(kNoLabel, kNoLabel, fst.Final(s), kNoStateId)); } for (ArcIterator<Fst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) { compacts_[pos++] = compactor.Compact(s, aiter.Value()); } if ((compactor.Size() != -1) && ((pos - fpos) != compactor.Size())) { FSTERROR() << "DefaultCompactStore: Compactor incompatible with FST"; error_ = true; return; } } if (pos != ncompacts_) { FSTERROR() << "DefaultCompactStore: Compactor incompatible with FST"; error_ = true; return; } } template <class Element, class Unsigned> template <class Iterator, class Compactor> DefaultCompactStore<Element, Unsigned>::DefaultCompactStore( const Iterator &begin, const Iterator &end, const Compactor &compactor) : states_(nullptr), compacts_(nullptr), nstates_(0), ncompacts_(0), narcs_(0), start_(kNoStateId), error_(false) { using Arc = typename Compactor::Arc; using Weight = typename Arc::Weight; if (compactor.Size() != -1) { ncompacts_ = std::distance(begin, end); if (compactor.Size() == 1) { // For strings, allows implicit final weight. Empty input is the empty // string. if (ncompacts_ == 0) { ++ncompacts_; } else { const auto arc = compactor.Expand(ncompacts_ - 1, *(begin + (ncompacts_ - 1))); if (arc.ilabel != kNoLabel) ++ncompacts_; } } if (ncompacts_ % compactor.Size()) { FSTERROR() << "DefaultCompactStore: Size of input container incompatible" << " with compactor"; error_ = true; return; } if (ncompacts_ == 0) return; start_ = 0; nstates_ = ncompacts_ / compactor.Size(); compacts_ = new Element[ncompacts_]; size_t i = 0; Iterator it = begin; for (; it != end; ++it, ++i) { compacts_[i] = *it; if (compactor.Expand(i, *it).ilabel != kNoLabel) ++narcs_; } if (i < ncompacts_) { compacts_[i] = compactor.Compact( i, Arc(kNoLabel, kNoLabel, Weight::One(), kNoStateId)); } } else { if (std::distance(begin, end) == 0) return; // Count # of states, arcs and compacts. auto it = begin; for (size_t i = 0; it != end; ++it, ++i) { const auto arc = compactor.Expand(i, *it); if (arc.ilabel != kNoLabel) { ++narcs_; ++ncompacts_; } else { ++nstates_; if (arc.weight != Weight::Zero()) ++ncompacts_; } } start_ = 0; compacts_ = new Element[ncompacts_]; states_ = new Unsigned[nstates_ + 1]; states_[nstates_] = ncompacts_; size_t i = 0; size_t s = 0; for (it = begin; it != end; ++it) { const auto arc = compactor.Expand(i, *it); if (arc.ilabel != kNoLabel) { compacts_[i++] = *it; } else { states_[s++] = i; if (arc.weight != Weight::Zero()) compacts_[i++] = *it; } } if ((s != nstates_) || (i != ncompacts_)) { FSTERROR() << "DefaultCompactStore: Ill-formed input container"; error_ = true; return; } } } template <class Element, class Unsigned> template <class Compactor> DefaultCompactStore<Element, Unsigned> *DefaultCompactStore<Element, Unsigned>::Read(std::istream &strm, const FstReadOptions &opts, const FstHeader &hdr, const Compactor &compactor) { std::unique_ptr<DefaultCompactStore<Element, Unsigned>> data( new DefaultCompactStore<Element, Unsigned>()); data->start_ = hdr.Start(); data->nstates_ = hdr.NumStates(); data->narcs_ = hdr.NumArcs(); if (compactor.Size() == -1) { if ((hdr.GetFlags() & FstHeader::IS_ALIGNED) && !AlignInput(strm)) { LOG(ERROR) << "DefaultCompactStore::Read: Alignment failed: " << opts.source; return nullptr; } auto b = (data->nstates_ + 1) * sizeof(Unsigned); data->states_region_.reset(MappedFile::Map( &strm, opts.mode == FstReadOptions::MAP, opts.source, b)); if (!strm || !data->states_region_) { LOG(ERROR) << "DefaultCompactStore::Read: Read failed: " << opts.source; return nullptr; } data->states_ = static_cast<Unsigned *>(data->states_region_->mutable_data()); } else { data->states_ = nullptr; } data->ncompacts_ = compactor.Size() == -1 ? data->states_[data->nstates_] : data->nstates_ * compactor.Size(); if ((hdr.GetFlags() & FstHeader::IS_ALIGNED) && !AlignInput(strm)) { LOG(ERROR) << "DefaultCompactStore::Read: Alignment failed: " << opts.source; return nullptr; } size_t b = data->ncompacts_ * sizeof(Element); data->compacts_region_.reset( MappedFile::Map(&strm, opts.mode == FstReadOptions::MAP, opts.source, b)); if (!strm || !data->compacts_region_) { LOG(ERROR) << "DefaultCompactStore::Read: Read failed: " << opts.source; return nullptr; } data->compacts_ = static_cast<Element *>(data->compacts_region_->mutable_data()); return data.release(); } template <class Element, class Unsigned> bool DefaultCompactStore<Element, Unsigned>::Write( std::ostream &strm, const FstWriteOptions &opts) const { if (states_) { if (opts.align && !AlignOutput(strm)) { LOG(ERROR) << "DefaultCompactStore::Write: Alignment failed: " << opts.source; return false; } strm.write(reinterpret_cast<char *>(states_), (nstates_ + 1) * sizeof(Unsigned)); } if (opts.align && !AlignOutput(strm)) { LOG(ERROR) << "DefaultCompactStore::Write: Alignment failed: " << opts.source; return false; } strm.write(reinterpret_cast<char *>(compacts_), ncompacts_ * sizeof(Element)); strm.flush(); if (!strm) { LOG(ERROR) << "DefaultCompactStore::Write: Write failed: " << opts.source; return false; } return true; } template <class Element, class Unsigned> const string &DefaultCompactStore<Element, Unsigned>::Type() { static const string *const type = new string("compact"); return *type; } template <class C, class U, class S> class DefaultCompactState; // Wraps an arc compactor and a compact store as a new Fst compactor. template <class C, class U, class S = DefaultCompactStore<typename C::Element, U>> class DefaultCompactor { public: using ArcCompactor = C; using Unsigned = U; using CompactStore = S; using Element = typename C::Element; using Arc = typename C::Arc; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using State = DefaultCompactState<C, U, S>; friend State; DefaultCompactor() : arc_compactor_(nullptr), compact_store_(nullptr) {} // Constructs from Fst. DefaultCompactor(const Fst<Arc> &fst, std::shared_ptr<ArcCompactor> arc_compactor) : arc_compactor_(std::move(arc_compactor)), compact_store_(std::make_shared<S>(fst, *arc_compactor_)) {} DefaultCompactor(const Fst<Arc> &fst, std::shared_ptr<DefaultCompactor<C, U, S>> compactor) : arc_compactor_(compactor->arc_compactor_), compact_store_(compactor->compact_store_ == nullptr ? std::make_shared<S>(fst, *arc_compactor_) : compactor->compact_store_) {} // Constructs from CompactStore. DefaultCompactor(std::shared_ptr<ArcCompactor> arc_compactor, std::shared_ptr<CompactStore> compact_store) : arc_compactor_(std::move(arc_compactor)), compact_store_(std::move(compact_store)) {} // Constructs from set of compact elements (when arc_compactor.Size() != -1). template <class Iterator> DefaultCompactor(const Iterator &b, const Iterator &e, std::shared_ptr<C> arc_compactor) : arc_compactor_(std::move(arc_compactor)), compact_store_(std::make_shared<S>(b, e, *arc_compactor_)) {} // Copy constructor. DefaultCompactor(const DefaultCompactor<C, U, S> &compactor) : arc_compactor_(std::make_shared<C>(*compactor.GetArcCompactor())), compact_store_(compactor.SharedCompactStore()) {} template <class OtherC> explicit DefaultCompactor(const DefaultCompactor<OtherC, U, S> &compactor) : arc_compactor_(std::make_shared<C>(*compactor.GetArcCompactor())), compact_store_(compactor.SharedCompactStore()) {} StateId Start() const { return compact_store_->Start(); } StateId NumStates() const { return compact_store_->NumStates(); } size_t NumArcs() const { return compact_store_->NumArcs(); } void SetState(StateId s, State *state) const { if (state->GetStateId() != s) state->Set(this, s); } static DefaultCompactor<C, U, S> *Read(std::istream &strm, const FstReadOptions &opts, const FstHeader &hdr) { std::shared_ptr<C> arc_compactor(C::Read(strm)); if (arc_compactor == nullptr) return nullptr; std::shared_ptr<S> compact_store(S::Read(strm, opts, hdr, *arc_compactor)); if (compact_store == nullptr) return nullptr; return new DefaultCompactor<C, U, S>(arc_compactor, compact_store); } bool Write(std::ostream &strm, const FstWriteOptions &opts) const { return arc_compactor_->Write(strm) && compact_store_->Write(strm, opts); } uint64 Properties() const { return arc_compactor_->Properties(); } bool IsCompatible(const Fst<Arc> &fst) const { return arc_compactor_->Compatible(fst); } bool Error() const { return compact_store_->Error(); } bool HasFixedOutdegree() const { return arc_compactor_->Size() != -1; } static const string &Type() { static const string *const type = [] { string type = "compact"; if (sizeof(U) != sizeof(uint32)) type += std::to_string(8 * sizeof(U)); type += "_"; type += C::Type(); if (CompactStore::Type() != "compact") { type += "_"; type += CompactStore::Type(); } return new string(type); }(); return *type; } const ArcCompactor *GetArcCompactor() const { return arc_compactor_.get(); } CompactStore *GetCompactStore() const { return compact_store_.get(); } std::shared_ptr<ArcCompactor> SharedArcCompactor() const { return arc_compactor_; } std::shared_ptr<CompactStore> SharedCompactStore() const { return compact_store_; } // TODO(allauzen): remove dependencies on this method and make private. Arc ComputeArc(StateId s, Unsigned i, uint32 f) const { return arc_compactor_->Expand(s, compact_store_->Compacts(i), f); } private: std::pair<Unsigned, Unsigned> CompactsRange(StateId s) const { std::pair<size_t, size_t> range; if (HasFixedOutdegree()) { range.first = s * arc_compactor_->Size(); range.second = arc_compactor_->Size(); } else { range.first = compact_store_->States(s); range.second = compact_store_->States(s + 1) - range.first; } return range; } private: std::shared_ptr<ArcCompactor> arc_compactor_; std::shared_ptr<CompactStore> compact_store_; }; // Default implementation of state attributes accessor class for // DefaultCompactor. Use of efficient specialization strongly encouraged. template <class C, class U, class S> class DefaultCompactState { public: using Arc = typename C::Arc; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; DefaultCompactState() = default; DefaultCompactState(const DefaultCompactor<C, U, S> *compactor, StateId s) : compactor_(compactor), s_(s), range_(compactor->CompactsRange(s)), has_final_( range_.second != 0 && compactor->ComputeArc(s, range_.first, kArcILabelValue).ilabel == kNoLabel) { if (has_final_) { ++range_.first; --range_.second; } } void Set(const DefaultCompactor<C, U, S> *compactor, StateId s) { compactor_ = compactor; s_ = s; range_ = compactor->CompactsRange(s); if (range_.second != 0 && compactor->ComputeArc(s, range_.first, kArcILabelValue).ilabel == kNoLabel) { has_final_ = true; ++range_.first; --range_.second; } else { has_final_ = false; } } StateId GetStateId() const { return s_; } Weight Final() const { if (!has_final_) return Weight::Zero(); return compactor_->ComputeArc(s_, range_.first - 1, kArcWeightValue).weight; } size_t NumArcs() const { return range_.second; } Arc GetArc(size_t i, uint32 f) const { return compactor_->ComputeArc(s_, range_.first + i, f); } private: const DefaultCompactor<C, U, S> *compactor_ = nullptr; // borrowed ref. StateId s_ = kNoStateId; std::pair<U, U> range_ = {0, 0}; bool has_final_ = false; }; // Specialization for DefaultCompactStore. template <class C, class U> class DefaultCompactState<C, U, DefaultCompactStore<typename C::Element, U>> { public: using Arc = typename C::Arc; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using CompactStore = DefaultCompactStore<typename C::Element, U>; DefaultCompactState() = default; DefaultCompactState( const DefaultCompactor<C, U, CompactStore> *compactor, StateId s) : arc_compactor_(compactor->GetArcCompactor()), s_(s) { Init(compactor); } void Set(const DefaultCompactor<C, U, CompactStore> *compactor, StateId s) { arc_compactor_ = compactor->GetArcCompactor(); s_ = s; has_final_ = false; Init(compactor); } StateId GetStateId() const { return s_; } Weight Final() const { if (!has_final_) return Weight::Zero(); return arc_compactor_->Expand(s_, *(compacts_ - 1), kArcWeightValue).weight; } size_t NumArcs() const { return num_arcs_; } Arc GetArc(size_t i, uint32 f) const { return arc_compactor_->Expand(s_, compacts_[i], f); } private: void Init(const DefaultCompactor<C, U, CompactStore> *compactor) { const auto *store = compactor->GetCompactStore(); U offset; if (!compactor->HasFixedOutdegree()) { // Variable out-degree compactor. offset = store->States(s_); num_arcs_ = store->States(s_ + 1) - offset; } else { // Fixed out-degree compactor. offset = s_ * arc_compactor_->Size(); num_arcs_ = arc_compactor_->Size(); } if (num_arcs_ > 0) { compacts_ = &(store->Compacts(offset)); if (arc_compactor_->Expand(s_, *compacts_, kArcILabelValue).ilabel == kNoStateId) { ++compacts_; --num_arcs_; has_final_ = true; } } } private: const C *arc_compactor_ = nullptr; // Borrowed reference. const typename C::Element *compacts_ = nullptr; // Borrowed reference. StateId s_ = kNoStateId; U num_arcs_ = 0; bool has_final_ = false; }; template <class Arc, class ArcCompactor, class Unsigned, class CompactStore, class CacheStore> class CompactFst; template <class F, class G> void Cast(const F &, G *); namespace internal { // Implementation class for CompactFst, which contains parametrizeable // Fst data storage (DefaultCompactStore by default) and Fst cache. template <class Arc, class C, class CacheStore = DefaultCacheStore<Arc>> class CompactFstImpl : public CacheBaseImpl<typename CacheStore::State, CacheStore> { public: using Weight = typename Arc::Weight; using StateId = typename Arc::StateId; using Compactor = C; using FstImpl<Arc>::SetType; using FstImpl<Arc>::SetProperties; using FstImpl<Arc>::Properties; using FstImpl<Arc>::SetInputSymbols; using FstImpl<Arc>::SetOutputSymbols; using FstImpl<Arc>::WriteHeader; using ImplBase = CacheBaseImpl<typename CacheStore::State, CacheStore>; using ImplBase::PushArc; using ImplBase::HasArcs; using ImplBase::HasFinal; using ImplBase::HasStart; using ImplBase::SetArcs; using ImplBase::SetFinal; using ImplBase::SetStart; CompactFstImpl() : ImplBase(CompactFstOptions()), compactor_() { SetType(Compactor::Type()); SetProperties(kNullProperties | kStaticProperties); } CompactFstImpl(const Fst<Arc> &fst, std::shared_ptr<Compactor> compactor, const CompactFstOptions &opts) : ImplBase(opts), compactor_(std::make_shared<Compactor>(fst, compactor)) { SetType(Compactor::Type()); SetInputSymbols(fst.InputSymbols()); SetOutputSymbols(fst.OutputSymbols()); if (compactor_->Error()) SetProperties(kError, kError); uint64 copy_properties = fst.Properties(kMutable, false) ? fst.Properties(kCopyProperties, true): CheckProperties(fst, kCopyProperties & ~kWeightedCycles & ~kUnweightedCycles, kCopyProperties); if ((copy_properties & kError) || !compactor_->IsCompatible(fst)) { FSTERROR() << "CompactFstImpl: Input Fst incompatible with compactor"; SetProperties(kError, kError); return; } SetProperties(copy_properties | kStaticProperties); } CompactFstImpl(std::shared_ptr<Compactor> compactor, const CompactFstOptions &opts) : ImplBase(opts), compactor_(compactor) { SetType(Compactor::Type()); SetProperties(kStaticProperties | compactor_->Properties()); if (compactor_->Error()) SetProperties(kError, kError); } CompactFstImpl(const CompactFstImpl<Arc, Compactor, CacheStore> &impl) : ImplBase(impl), compactor_(impl.compactor_ == nullptr ? std::make_shared<Compactor>() : std::make_shared<Compactor>(*impl.compactor_)) { SetType(impl.Type()); SetProperties(impl.Properties()); SetInputSymbols(impl.InputSymbols()); SetOutputSymbols(impl.OutputSymbols()); } // Allows to change the cache store from OtherI to I. template <class OtherCacheStore> CompactFstImpl(const CompactFstImpl<Arc, Compactor, OtherCacheStore> &impl) : ImplBase(CacheOptions(impl.GetCacheGc(), impl.GetCacheLimit())), compactor_(impl.compactor_ == nullptr ? std::make_shared<Compactor>() : std::make_shared<Compactor>(*impl.compactor_)) { SetType(impl.Type()); SetProperties(impl.Properties()); SetInputSymbols(impl.InputSymbols()); SetOutputSymbols(impl.OutputSymbols()); } StateId Start() { if (!HasStart()) SetStart(compactor_->Start()); return ImplBase::Start(); } Weight Final(StateId s) { if (HasFinal(s)) return ImplBase::Final(s); compactor_->SetState(s, &state_); return state_.Final(); } StateId NumStates() const { if (Properties(kError)) return 0; return compactor_->NumStates(); } size_t NumArcs(StateId s) { if (HasArcs(s)) return ImplBase::NumArcs(s); compactor_->SetState(s, &state_); return state_.NumArcs(); } size_t NumInputEpsilons(StateId s) { if (!HasArcs(s) && !Properties(kILabelSorted)) Expand(s); if (HasArcs(s)) return ImplBase::NumInputEpsilons(s); return CountEpsilons(s, false); } size_t NumOutputEpsilons(StateId s) { if (!HasArcs(s) && !Properties(kOLabelSorted)) Expand(s); if (HasArcs(s)) return ImplBase::NumOutputEpsilons(s); return CountEpsilons(s, true); } size_t CountEpsilons(StateId s, bool output_epsilons) { compactor_->SetState(s, &state_); const uint32 f = output_epsilons ? kArcOLabelValue : kArcILabelValue; size_t num_eps = 0; for (size_t i = 0; i < state_.NumArcs(); ++i) { const auto& arc = state_.GetArc(i, f); const auto label = output_epsilons ? arc.olabel : arc.ilabel; if (label == 0) ++num_eps; else if (label > 0) break; } return num_eps; } static CompactFstImpl<Arc, Compactor, CacheStore> *Read( std::istream &strm, const FstReadOptions &opts) { std::unique_ptr<CompactFstImpl<Arc, Compactor, CacheStore>> impl( new CompactFstImpl<Arc, Compactor, CacheStore>()); FstHeader hdr; if (!impl->ReadHeader(strm, opts, kMinFileVersion, &hdr)) { return nullptr; } // Ensures compatibility. if (hdr.Version() == kAlignedFileVersion) { hdr.SetFlags(hdr.GetFlags() | FstHeader::IS_ALIGNED); } impl->compactor_ = std::shared_ptr<Compactor>( Compactor::Read(strm, opts, hdr)); if (!impl->compactor_) { return nullptr; } return impl.release(); } bool Write(std::ostream &strm, const FstWriteOptions &opts) const { FstHeader hdr; hdr.SetStart(compactor_->Start()); hdr.SetNumStates(compactor_->NumStates()); hdr.SetNumArcs(compactor_->NumArcs()); // Ensures compatibility. const auto file_version = opts.align ? kAlignedFileVersion : kFileVersion; WriteHeader(strm, opts, file_version, &hdr); return compactor_->Write(strm, opts); } // Provides information needed for generic state iterator. void InitStateIterator(StateIteratorData<Arc> *data) const { data->base = nullptr; data->nstates = compactor_->NumStates(); } void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) { if (!HasArcs(s)) Expand(s); ImplBase::InitArcIterator(s, data); } void Expand(StateId s) { compactor_->SetState(s, &state_); for (size_t i = 0; i < state_.NumArcs(); ++i) PushArc(s, state_.GetArc(i, kArcValueFlags)); SetArcs(s); if (!HasFinal(s)) SetFinal(s, state_.Final()); } const Compactor *GetCompactor() const { return compactor_.get(); } std::shared_ptr<Compactor> SharedCompactor() const { return compactor_; } void SetCompactor(std::shared_ptr<Compactor> compactor) { // TODO(allauzen): is this correct? is this needed? // TODO(allauzen): consider removing and forcing this through direct calls // to compactor. compactor_ = compactor; } // Properties always true of this FST class. static constexpr uint64 kStaticProperties = kExpanded; protected: template <class OtherArc, class OtherCompactor, class OtherCacheStore> explicit CompactFstImpl( const CompactFstImpl<OtherArc, OtherCompactor, OtherCacheStore> &impl) : compactor_(std::make_shared<Compactor>(*impl.GetCompactor())) { SetType(impl.Type()); SetProperties(impl.Properties()); SetInputSymbols(impl.InputSymbols()); SetOutputSymbols(impl.OutputSymbols()); } private: // Allows access during write. template <class AnyArc, class ArcCompactor, class Unsigned, class CompactStore, class AnyCacheStore> friend class ::fst::CompactFst; // allow access during write. // Current unaligned file format version. static constexpr int kFileVersion = 2; // Current aligned file format version. static constexpr int kAlignedFileVersion = 1; // Minimum file format version supported. static constexpr int kMinFileVersion = 1; std::shared_ptr<Compactor> compactor_; typename Compactor::State state_; }; template <class Arc, class Compactor, class CacheStore> constexpr uint64 CompactFstImpl<Arc, Compactor, CacheStore>::kStaticProperties; template <class Arc, class Compactor, class CacheStore> constexpr int CompactFstImpl<Arc, Compactor, CacheStore>::kFileVersion; template <class Arc, class Compactor, class CacheStore> constexpr int CompactFstImpl<Arc, Compactor, CacheStore>::kAlignedFileVersion; template <class Arc, class Compactor, class CacheStore> constexpr int CompactFstImpl<Arc, Compactor, CacheStore>::kMinFileVersion; } // namespace internal // This class attaches interface to implementation and handles reference // counting, delegating most methods to ImplToExpandedFst. The Unsigned type // is used to represent indices into the compact arc array. (Template // argument defaults are declared in fst-decl.h.) template <class A, class ArcCompactor, class Unsigned, class CompactStore, class CacheStore> class CompactFst : public ImplToExpandedFst<internal::CompactFstImpl< A, DefaultCompactor<ArcCompactor, Unsigned, CompactStore>, CacheStore>> { public: template <class F, class G> void friend Cast(const F &, G *); using Arc = A; using StateId = typename A::StateId; using Compactor = DefaultCompactor<ArcCompactor, Unsigned, CompactStore>; using Impl = internal::CompactFstImpl<A, Compactor, CacheStore>; using Store = CacheStore; // for CacheArcIterator friend class StateIterator< CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>>; friend class ArcIterator< CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>>; CompactFst() : ImplToExpandedFst<Impl>(std::make_shared<Impl>()) {} // If data is not nullptr, it is assumed to be already initialized. explicit CompactFst( const Fst<A> &fst, const ArcCompactor &compactor = ArcCompactor(), const CompactFstOptions &opts = CompactFstOptions(), std::shared_ptr<CompactStore> data = std::shared_ptr<CompactStore>()) : ImplToExpandedFst<Impl>( std::make_shared<Impl>( fst, std::make_shared<Compactor>( std::make_shared<ArcCompactor>(compactor), data), opts)) {} // If data is not nullptr, it is assumed to be already initialized. CompactFst( const Fst<Arc> &fst, std::shared_ptr<ArcCompactor> compactor, const CompactFstOptions &opts = CompactFstOptions(), std::shared_ptr<CompactStore> data = std::shared_ptr<CompactStore>()) : ImplToExpandedFst<Impl>( std::make_shared<Impl>(fst, std::make_shared<Compactor>(compactor, data), opts)) {} // The following 2 constructors take as input two iterators delimiting a set // of (already) compacted transitions, starting with the transitions out of // the initial state. The format of the input differs for fixed out-degree // and variable out-degree compactors. // // - For fixed out-degree compactors, the final weight (encoded as a // compacted transition) needs to be given only for final states. All strings // (compactor of size 1) will be assume to be terminated by a final state // even when the final state is not implicitely given. // // - For variable out-degree compactors, the final weight (encoded as a // compacted transition) needs to be given for all states and must appeared // first in the list (for state s, final weight of s, followed by outgoing // transitons in s). // // These 2 constructors allows the direct construction of a CompactFst // without first creating a more memory-hungry regular FST. This is useful // when memory usage is severely constrained. template <class Iterator> explicit CompactFst(const Iterator &begin, const Iterator &end, const ArcCompactor &compactor = ArcCompactor(), const CompactFstOptions &opts = CompactFstOptions()) : ImplToExpandedFst<Impl>( std::make_shared<Impl>( std::make_shared<Compactor>( begin, end, std::make_shared<ArcCompactor>(compactor)), opts)) {} template <class Iterator> CompactFst(const Iterator &begin, const Iterator &end, std::shared_ptr<ArcCompactor> compactor, const CompactFstOptions &opts = CompactFstOptions()) : ImplToExpandedFst<Impl>( std::make_shared<Impl>( std::make_shared<Compactor>(begin, end, compactor), opts)) {} // See Fst<>::Copy() for doc. CompactFst( const CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore> &fst, bool safe = false) : ImplToExpandedFst<Impl>(fst, safe) {} // Get a copy of this CompactFst. See Fst<>::Copy() for further doc. CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore> *Copy( bool safe = false) const override { return new CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>( *this, safe); } // Read a CompactFst from an input stream; return nullptr on error static CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore> *Read( std::istream &strm, const FstReadOptions &opts) { auto *impl = Impl::Read(strm, opts); return impl ? new CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>(std::shared_ptr<Impl>(impl)) : nullptr; } // Read a CompactFst from a file; return nullptr on error // Empty filename reads from standard input static CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore> *Read( const string &filename) { auto *impl = ImplToExpandedFst<Impl>::Read(filename); return impl ? new CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>(std::shared_ptr<Impl>(impl)) : nullptr; } bool Write(std::ostream &strm, const FstWriteOptions &opts) const override { return GetImpl()->Write(strm, opts); } bool Write(const string &filename) const override { return Fst<Arc>::WriteFile(filename); } template <class FST> static bool WriteFst(const FST &fst, const ArcCompactor &compactor, std::ostream &strm, const FstWriteOptions &opts); void InitStateIterator(StateIteratorData<Arc> *data) const override { GetImpl()->InitStateIterator(data); } void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override { GetMutableImpl()->InitArcIterator(s, data); } MatcherBase<Arc> *InitMatcher(MatchType match_type) const override { return new SortedMatcher< CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>>( *this, match_type); } template <class Iterator> void SetCompactElements(const Iterator &b, const Iterator &e) { GetMutableImpl()->SetCompactor(std::make_shared<Compactor>( b, e, std::make_shared<ArcCompactor>())); } private: using ImplToFst<Impl, ExpandedFst<Arc>>::GetImpl; using ImplToFst<Impl, ExpandedFst<Arc>>::GetMutableImpl; explicit CompactFst(std::shared_ptr<Impl> impl) : ImplToExpandedFst<Impl>(impl) {} // Use overloading to extract the type of the argument. static Impl *GetImplIfCompactFst( const CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore> &compact_fst) { return compact_fst.GetImpl(); } // This does not give privileged treatment to subclasses of CompactFst. template <typename NonCompactFst> static Impl *GetImplIfCompactFst(const NonCompactFst &fst) { return nullptr; } CompactFst &operator=(const CompactFst &fst) = delete; }; // Writes FST in Compact format, with a possible pass over the machine before // writing to compute the number of states and arcs. template <class A, class ArcCompactor, class Unsigned, class CompactStore, class CacheStore> template <class FST> bool CompactFst<A, ArcCompactor, Unsigned, CompactStore, CacheStore>::WriteFst( const FST &fst, const ArcCompactor &compactor, std::ostream &strm, const FstWriteOptions &opts) { using Arc = A; using Weight = typename A::Weight; using Element = typename ArcCompactor::Element; const auto file_version = opts.align ? Impl::kAlignedFileVersion : Impl::kFileVersion; size_t num_arcs = -1; size_t num_states = -1; auto first_pass_compactor = compactor; if (auto *impl = GetImplIfCompactFst(fst)) { num_arcs = impl->GetCompactor()->GetCompactStore()->NumArcs(); num_states = impl->GetCompactor()->GetCompactStore()->NumStates(); first_pass_compactor = *impl->GetCompactor()->GetArcCompactor(); } else { // A first pass is needed to compute the state of the compactor, which // is saved ahead of the rest of the data structures. This unfortunately // means forcing a complete double compaction when writing in this format. // TODO(allauzen): eliminate mutable state from compactors. num_arcs = 0; num_states = 0; for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); ++num_states; if (fst.Final(s) != Weight::Zero()) { first_pass_compactor.Compact( s, Arc(kNoLabel, kNoLabel, fst.Final(s), kNoStateId)); } for (ArcIterator<FST> aiter(fst, s); !aiter.Done(); aiter.Next()) { ++num_arcs; first_pass_compactor.Compact(s, aiter.Value()); } } } FstHeader hdr; hdr.SetStart(fst.Start()); hdr.SetNumStates(num_states); hdr.SetNumArcs(num_arcs); string type = "compact"; if (sizeof(Unsigned) != sizeof(uint32)) { type += std::to_string(CHAR_BIT * sizeof(Unsigned)); } type += "_"; type += ArcCompactor::Type(); if (CompactStore::Type() != "compact") { type += "_"; type += CompactStore::Type(); } const auto copy_properties = fst.Properties(kCopyProperties, true); if ((copy_properties & kError) || !compactor.Compatible(fst)) { FSTERROR() << "Fst incompatible with compactor"; return false; } uint64 properties = copy_properties | Impl::kStaticProperties; internal::FstImpl<Arc>::WriteFstHeader(fst, strm, opts, file_version, type, properties, &hdr); first_pass_compactor.Write(strm); if (first_pass_compactor.Size() == -1) { if (opts.align && !AlignOutput(strm)) { LOG(ERROR) << "CompactFst::Write: Alignment failed: " << opts.source; return false; } Unsigned compacts = 0; for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); strm.write(reinterpret_cast<const char *>(&compacts), sizeof(compacts)); if (fst.Final(s) != Weight::Zero()) { ++compacts; } compacts += fst.NumArcs(s); } strm.write(reinterpret_cast<const char *>(&compacts), sizeof(compacts)); } if (opts.align && !AlignOutput(strm)) { LOG(ERROR) << "Could not align file during write after writing states"; } const auto &second_pass_compactor = compactor; Element element; for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (fst.Final(s) != Weight::Zero()) { element = second_pass_compactor.Compact( s, A(kNoLabel, kNoLabel, fst.Final(s), kNoStateId)); strm.write(reinterpret_cast<const char *>(&element), sizeof(element)); } for (ArcIterator<FST> aiter(fst, s); !aiter.Done(); aiter.Next()) { element = second_pass_compactor.Compact(s, aiter.Value()); strm.write(reinterpret_cast<const char *>(&element), sizeof(element)); } } strm.flush(); if (!strm) { LOG(ERROR) << "CompactFst write failed: " << opts.source; return false; } return true; } // Specialization for CompactFst; see generic version in fst.h for sample // usage (but use the CompactFst type!). This version should inline. template <class Arc, class ArcCompactor, class Unsigned, class CompactStore, class CacheStore> class StateIterator< CompactFst<Arc, ArcCompactor, Unsigned, CompactStore, CacheStore>> { public: using StateId = typename Arc::StateId; explicit StateIterator( const CompactFst<Arc, ArcCompactor, Unsigned, CompactStore, CacheStore> &fst) : nstates_(fst.GetImpl()->NumStates()), s_(0) {} bool Done() const { return s_ >= nstates_; } StateId Value() const { return s_; } void Next() { ++s_; } void Reset() { s_ = 0; } private: StateId nstates_; StateId s_; }; // Specialization for CompactFst. Never caches, // always iterates over the underlying compact elements. template <class Arc, class ArcCompactor, class Unsigned, class CompactStore, class CacheStore> class ArcIterator<CompactFst< Arc, ArcCompactor, Unsigned, CompactStore, CacheStore>> { public: using StateId = typename Arc::StateId; using Element = typename ArcCompactor::Element; using Compactor = DefaultCompactor<ArcCompactor, Unsigned, CompactStore>; using State = typename Compactor::State; ArcIterator(const CompactFst<Arc, ArcCompactor, Unsigned, CompactStore, CacheStore> &fst, StateId s) : state_(fst.GetImpl()->GetCompactor(), s), pos_(0), flags_(kArcValueFlags) {} bool Done() const { return pos_ >= state_.NumArcs(); } const Arc &Value() const { arc_ = state_.GetArc(pos_, flags_); return arc_; } void Next() { ++pos_; } size_t Position() const { return pos_; } void Reset() { pos_ = 0; } void Seek(size_t pos) { pos_ = pos; } uint32 Flags() const { return flags_; } void SetFlags(uint32 f, uint32 m) { flags_ &= ~m; flags_ |= (f & kArcValueFlags); } private: State state_; size_t pos_; mutable Arc arc_; uint32 flags_; }; // ArcCompactor for unweighted string FSTs. template <class A> class StringCompactor { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Element = Label; Element Compact(StateId s, const Arc &arc) const { return arc.ilabel; } Arc Expand(StateId s, const Element &p, uint32 f = kArcValueFlags) const { return Arc(p, p, Weight::One(), p != kNoLabel ? s + 1 : kNoStateId); } constexpr ssize_t Size() const { return 1; } constexpr uint64 Properties() const { return kString | kAcceptor | kUnweighted; } bool Compatible(const Fst<Arc> &fst) const { const auto props = Properties(); return fst.Properties(props, true) == props; } static const string &Type() { static const string *const type = new string("string"); return *type; } bool Write(std::ostream &strm) const { return true; } static StringCompactor *Read(std::istream &strm) { return new StringCompactor; } }; // ArcCompactor for weighted string FSTs. template <class A> class WeightedStringCompactor { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Element = std::pair<Label, Weight>; Element Compact(StateId s, const Arc &arc) const { return std::make_pair(arc.ilabel, arc.weight); } Arc Expand(StateId s, const Element &p, uint32 f = kArcValueFlags) const { return Arc(p.first, p.first, p.second, p.first != kNoLabel ? s + 1 : kNoStateId); } constexpr ssize_t Size() const { return 1; } constexpr uint64 Properties() const { return kString | kAcceptor; } bool Compatible(const Fst<Arc> &fst) const { const auto props = Properties(); return fst.Properties(props, true) == props; } static const string &Type() { static const string *const type = new string("weighted_string"); return *type; } bool Write(std::ostream &strm) const { return true; } static WeightedStringCompactor *Read(std::istream &strm) { return new WeightedStringCompactor; } }; // ArcCompactor for unweighted acceptor FSTs. template <class A> class UnweightedAcceptorCompactor { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Element = std::pair<Label, StateId>; Element Compact(StateId s, const Arc &arc) const { return std::make_pair(arc.ilabel, arc.nextstate); } Arc Expand(StateId s, const Element &p, uint32 f = kArcValueFlags) const { return Arc(p.first, p.first, Weight::One(), p.second); } constexpr ssize_t Size() const { return -1; } constexpr uint64 Properties() const { return kAcceptor | kUnweighted; } bool Compatible(const Fst<Arc> &fst) const { const auto props = Properties(); return fst.Properties(props, true) == props; } static const string &Type() { static const string *const type = new string("unweighted_acceptor"); return *type; } bool Write(std::ostream &strm) const { return true; } static UnweightedAcceptorCompactor *Read(std::istream &istrm) { return new UnweightedAcceptorCompactor; } }; // ArcCompactor for weighted acceptor FSTs. template <class A> class AcceptorCompactor { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Element = std::pair<std::pair<Label, Weight>, StateId>; Element Compact(StateId s, const Arc &arc) const { return std::make_pair(std::make_pair(arc.ilabel, arc.weight), arc.nextstate); } Arc Expand(StateId s, const Element &p, uint32 f = kArcValueFlags) const { return Arc(p.first.first, p.first.first, p.first.second, p.second); } constexpr ssize_t Size() const { return -1; } constexpr uint64 Properties() const { return kAcceptor; } bool Compatible(const Fst<Arc> &fst) const { const auto props = Properties(); return fst.Properties(props, true) == props; } static const string &Type() { static const string *const type = new string("acceptor"); return *type; } bool Write(std::ostream &strm) const { return true; } static AcceptorCompactor *Read(std::istream &strm) { return new AcceptorCompactor; } }; // ArcCompactor for unweighted FSTs. template <class A> class UnweightedCompactor { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Element = std::pair<std::pair<Label, Label>, StateId>; Element Compact(StateId s, const Arc &arc) const { return std::make_pair(std::make_pair(arc.ilabel, arc.olabel), arc.nextstate); } Arc Expand(StateId s, const Element &p, uint32 f = kArcValueFlags) const { return Arc(p.first.first, p.first.second, Weight::One(), p.second); } constexpr ssize_t Size() const { return -1; } constexpr uint64 Properties() const { return kUnweighted; } bool Compatible(const Fst<Arc> &fst) const { const auto props = Properties(); return fst.Properties(props, true) == props; } static const string &Type() { static const string *const type = new string("unweighted"); return *type; } bool Write(std::ostream &strm) const { return true; } static UnweightedCompactor *Read(std::istream &strm) { return new UnweightedCompactor; } }; template <class Arc, class Unsigned /* = uint32 */> using CompactStringFst = CompactFst<Arc, StringCompactor<Arc>, Unsigned>; template <class Arc, class Unsigned /* = uint32 */> using CompactWeightedStringFst = CompactFst<Arc, WeightedStringCompactor<Arc>, Unsigned>; template <class Arc, class Unsigned /* = uint32 */> using CompactAcceptorFst = CompactFst<Arc, AcceptorCompactor<Arc>, Unsigned>; template <class Arc, class Unsigned /* = uint32 */> using CompactUnweightedFst = CompactFst<Arc, UnweightedCompactor<Arc>, Unsigned>; template <class Arc, class Unsigned /* = uint32 */> using CompactUnweightedAcceptorFst = CompactFst<Arc, UnweightedAcceptorCompactor<Arc>, Unsigned>; using StdCompactStringFst = CompactStringFst<StdArc, uint32>; using StdCompactWeightedStringFst = CompactWeightedStringFst<StdArc, uint32>; using StdCompactAcceptorFst = CompactAcceptorFst<StdArc, uint32>; using StdCompactUnweightedFst = CompactUnweightedFst<StdArc, uint32>; using StdCompactUnweightedAcceptorFst = CompactUnweightedAcceptorFst<StdArc, uint32>; } // namespace fst #endif // FST_COMPACT_FST_H_
0
coqui_public_repos/STT-models/irish/itml
coqui_public_repos/STT-models/irish/itml/v0.1.0/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT-models/chuvash/itml
coqui_public_repos/STT-models/chuvash/itml/v0.1.0/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstdisambiguate-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Disambiguates an FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/script/disambiguate.h> DECLARE_double(delta); DECLARE_int64(nstate); DECLARE_string(weight); DECLARE_int64(subsequential_label); int fstdisambiguate_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; using fst::script::VectorFstClass; using fst::script::WeightClass; string usage = "Disambiguates an FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; VectorFstClass ofst(ifst->ArcType()); const auto weight_threshold = FLAGS_weight.empty() ? WeightClass::Zero(ifst->WeightType()) : WeightClass(ifst->WeightType(), FLAGS_weight); const s::DisambiguateOptions opts(FLAGS_delta, weight_threshold, FLAGS_nstate, FLAGS_subsequential_label); s::Disambiguate(*ifst, &ofst, opts); return !ofst.Write(out_name); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/script/project.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_PROJECT_H_ #define FST_SCRIPT_PROJECT_H_ #include <utility> #include <fst/project.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using ProjectArgs = std::pair<MutableFstClass *, ProjectType>; template <class Arc> void Project(ProjectArgs *args) { MutableFst<Arc> *fst = std::get<0>(*args)->GetMutableFst<Arc>(); Project(fst, std::get<1>(*args)); } void Project(MutableFstClass *fst, ProjectType project_type); } // namespace script } // namespace fst #endif // FST_SCRIPT_PROJECT_H_
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/utils/managers.py
import json import random from typing import Any, Dict, List, Tuple, Union import fsspec import numpy as np import torch from TTS.config import load_config from TTS.encoder.utils.generic_utils import setup_encoder_model from TTS.utils.audio import AudioProcessor def load_file(path: str): if path.endswith(".json"): with fsspec.open(path, "r") as f: return json.load(f) elif path.endswith(".pth"): with fsspec.open(path, "rb") as f: return torch.load(f, map_location="cpu") else: raise ValueError("Unsupported file type") def save_file(obj: Any, path: str): if path.endswith(".json"): with fsspec.open(path, "w") as f: json.dump(obj, f, indent=4) elif path.endswith(".pth"): with fsspec.open(path, "wb") as f: torch.save(obj, f) else: raise ValueError("Unsupported file type") class BaseIDManager: """Base `ID` Manager class. Every new `ID` manager must inherit this. It defines common `ID` manager specific functions. """ def __init__(self, id_file_path: str = ""): self.name_to_id = {} if id_file_path: self.load_ids_from_file(id_file_path) @staticmethod def _load_json(json_file_path: str) -> Dict: with fsspec.open(json_file_path, "r") as f: return json.load(f) @staticmethod def _save_json(json_file_path: str, data: dict) -> None: with fsspec.open(json_file_path, "w") as f: json.dump(data, f, indent=4) def set_ids_from_data(self, items: List, parse_key: str) -> None: """Set IDs from data samples. Args: items (List): Data sampled returned by `load_tts_samples()`. """ self.name_to_id = self.parse_ids_from_data(items, parse_key=parse_key) def load_ids_from_file(self, file_path: str) -> None: """Set IDs from a file. Args: file_path (str): Path to the file. """ self.name_to_id = load_file(file_path) def save_ids_to_file(self, file_path: str) -> None: """Save IDs to a json file. Args: file_path (str): Path to the output file. """ save_file(self.name_to_id, file_path) def get_random_id(self) -> Any: """Get a random embedding. Args: Returns: np.ndarray: embedding. """ if self.name_to_id: return self.name_to_id[random.choices(list(self.name_to_id.keys()))[0]] return None @staticmethod def parse_ids_from_data(items: List, parse_key: str) -> Tuple[Dict]: """Parse IDs from data samples retured by `load_tts_samples()`. Args: items (list): Data sampled returned by `load_tts_samples()`. parse_key (str): The key to being used to parse the data. Returns: Tuple[Dict]: speaker IDs. """ classes = sorted({item[parse_key] for item in items}) ids = {name: i for i, name in enumerate(classes)} return ids class EmbeddingManager(BaseIDManager): """Base `Embedding` Manager class. Every new `Embedding` manager must inherit this. It defines common `Embedding` manager specific functions. It expects embeddings files in the following format: :: { 'audio_file_key':{ 'name': 'category_name', 'embedding'[<embedding_values>] }, ... } `audio_file_key` is a unique key to the audio file in the dataset. It can be the path to the file or any other unique key. `embedding` is the embedding vector of the audio file. `name` can be name of the speaker of the audio file. """ def __init__( self, embedding_file_path: Union[str, List[str]] = "", id_file_path: str = "", encoder_model_path: str = "", encoder_config_path: str = "", use_cuda: bool = False, ): super().__init__(id_file_path=id_file_path) self.embeddings = {} self.embeddings_by_names = {} self.clip_ids = [] self.encoder = None self.encoder_ap = None self.use_cuda = use_cuda if embedding_file_path: if isinstance(embedding_file_path, list): self.load_embeddings_from_list_of_files(embedding_file_path) else: self.load_embeddings_from_file(embedding_file_path) if encoder_model_path and encoder_config_path: self.init_encoder(encoder_model_path, encoder_config_path, use_cuda) @property def num_embeddings(self): """Get number of embeddings.""" return len(self.embeddings) @property def num_names(self): """Get number of embeddings.""" return len(self.embeddings_by_names) @property def embedding_dim(self): """Dimensionality of embeddings. If embeddings are not loaded, returns zero.""" if self.embeddings: return len(self.embeddings[list(self.embeddings.keys())[0]]["embedding"]) return 0 @property def embedding_names(self): """Get embedding names.""" return list(self.embeddings_by_names.keys()) def save_embeddings_to_file(self, file_path: str) -> None: """Save embeddings to a json file. Args: file_path (str): Path to the output file. """ save_file(self.embeddings, file_path) @staticmethod def read_embeddings_from_file(file_path: str): """Load embeddings from a json file. Args: file_path (str): Path to the file. """ embeddings = load_file(file_path) speakers = sorted({x["name"] for x in embeddings.values()}) name_to_id = {name: i for i, name in enumerate(speakers)} clip_ids = list(set(sorted(clip_name for clip_name in embeddings.keys()))) # cache embeddings_by_names for fast inference using a bigger speakers.json embeddings_by_names = {} for x in embeddings.values(): if x["name"] not in embeddings_by_names.keys(): embeddings_by_names[x["name"]] = [x["embedding"]] else: embeddings_by_names[x["name"]].append(x["embedding"]) return name_to_id, clip_ids, embeddings, embeddings_by_names def load_embeddings_from_file(self, file_path: str) -> None: """Load embeddings from a json file. Args: file_path (str): Path to the target json file. """ self.name_to_id, self.clip_ids, self.embeddings, self.embeddings_by_names = self.read_embeddings_from_file( file_path ) def load_embeddings_from_list_of_files(self, file_paths: List[str]) -> None: """Load embeddings from a list of json files and don't allow duplicate keys. Args: file_paths (List[str]): List of paths to the target json files. """ self.name_to_id = {} self.clip_ids = [] self.embeddings_by_names = {} self.embeddings = {} for file_path in file_paths: ids, clip_ids, embeddings, embeddings_by_names = self.read_embeddings_from_file(file_path) # check colliding keys duplicates = set(self.embeddings.keys()) & set(embeddings.keys()) if duplicates: raise ValueError(f" [!] Duplicate embedding names <{duplicates}> in {file_path}") # store values self.name_to_id.update(ids) self.clip_ids.extend(clip_ids) self.embeddings_by_names.update(embeddings_by_names) self.embeddings.update(embeddings) # reset name_to_id to get the right speaker ids self.name_to_id = {name: i for i, name in enumerate(self.name_to_id)} def get_embedding_by_clip(self, clip_idx: str) -> List: """Get embedding by clip ID. Args: clip_idx (str): Target clip ID. Returns: List: embedding as a list. """ return self.embeddings[clip_idx]["embedding"] def get_embeddings_by_name(self, idx: str) -> List[List]: """Get all embeddings of a speaker. Args: idx (str): Target name. Returns: List[List]: all the embeddings of the given speaker. """ return self.embeddings_by_names[idx] def get_embeddings_by_names(self) -> Dict: """Get all embeddings by names. Returns: Dict: all the embeddings of each speaker. """ embeddings_by_names = {} for x in self.embeddings.values(): if x["name"] not in embeddings_by_names.keys(): embeddings_by_names[x["name"]] = [x["embedding"]] else: embeddings_by_names[x["name"]].append(x["embedding"]) return embeddings_by_names def get_mean_embedding(self, idx: str, num_samples: int = None, randomize: bool = False) -> np.ndarray: """Get mean embedding of a idx. Args: idx (str): Target name. num_samples (int, optional): Number of samples to be averaged. Defaults to None. randomize (bool, optional): Pick random `num_samples` of embeddings. Defaults to False. Returns: np.ndarray: Mean embedding. """ embeddings = self.get_embeddings_by_name(idx) if num_samples is None: embeddings = np.stack(embeddings).mean(0) else: assert len(embeddings) >= num_samples, f" [!] {idx} has number of samples < {num_samples}" if randomize: embeddings = np.stack(random.choices(embeddings, k=num_samples)).mean(0) else: embeddings = np.stack(embeddings[:num_samples]).mean(0) return embeddings def get_random_embedding(self) -> Any: """Get a random embedding. Args: Returns: np.ndarray: embedding. """ if self.embeddings: return self.embeddings[random.choices(list(self.embeddings.keys()))[0]]["embedding"] return None def get_clips(self) -> List: return sorted(self.embeddings.keys()) def init_encoder(self, model_path: str, config_path: str, use_cuda=False) -> None: """Initialize a speaker encoder model. Args: model_path (str): Model file path. config_path (str): Model config file path. use_cuda (bool, optional): Use CUDA. Defaults to False. """ self.use_cuda = use_cuda self.encoder_config = load_config(config_path) self.encoder = setup_encoder_model(self.encoder_config) self.encoder_criterion = self.encoder.load_checkpoint( self.encoder_config, model_path, eval=True, use_cuda=use_cuda, cache=True ) self.encoder_ap = AudioProcessor(**self.encoder_config.audio) def compute_embedding_from_clip(self, wav_file: Union[str, List[str]]) -> list: """Compute a embedding from a given audio file. Args: wav_file (Union[str, List[str]]): Target file path. Returns: list: Computed embedding. """ def _compute(wav_file: str): waveform = self.encoder_ap.load_wav(wav_file, sr=self.encoder_ap.sample_rate) if not self.encoder_config.model_params.get("use_torch_spec", False): m_input = self.encoder_ap.melspectrogram(waveform) m_input = torch.from_numpy(m_input) else: m_input = torch.from_numpy(waveform) if self.use_cuda: m_input = m_input.cuda() m_input = m_input.unsqueeze(0) embedding = self.encoder.compute_embedding(m_input) return embedding if isinstance(wav_file, list): # compute the mean embedding embeddings = None for wf in wav_file: embedding = _compute(wf) if embeddings is None: embeddings = embedding else: embeddings += embedding return (embeddings / len(wav_file))[0].tolist() embedding = _compute(wav_file) return embedding[0].tolist() def compute_embeddings(self, feats: Union[torch.Tensor, np.ndarray]) -> List: """Compute embedding from features. Args: feats (Union[torch.Tensor, np.ndarray]): Input features. Returns: List: computed embedding. """ if isinstance(feats, np.ndarray): feats = torch.from_numpy(feats) if feats.ndim == 2: feats = feats.unsqueeze(0) if self.use_cuda: feats = feats.cuda() return self.encoder.compute_embedding(feats)
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/concat.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to compute the concatenation of two FSTs. #ifndef FST_CONCAT_H_ #define FST_CONCAT_H_ #include <algorithm> #include <vector> #include <fst/mutable-fst.h> #include <fst/rational.h> namespace fst { // Computes the concatenation (product) of two FSTs. If FST1 transduces string // x to y with weight a and FST2 transduces string w to v with weight b, then // their concatenation transduces string xw to yv with weight Times(a, b). // // This version modifies its MutableFst argument (in first position). // // Complexity: // // Time: O(V1 + V2 + E2) // Space: O(V1 + V2 + E2) // // where Vi is the number of states, and Ei is the number of arcs, of the ith // FST. template <class Arc> void Concat(MutableFst<Arc> *fst1, const Fst<Arc> &fst2) { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // Checks that the symbol table are compatible. if (!CompatSymbols(fst1->InputSymbols(), fst2.InputSymbols()) || !CompatSymbols(fst1->OutputSymbols(), fst2.OutputSymbols())) { FSTERROR() << "Concat: Input/output symbol tables of 1st argument " << "does not match input/output symbol tables of 2nd argument"; fst1->SetProperties(kError, kError); return; } const auto props1 = fst1->Properties(kFstProperties, false); const auto props2 = fst2.Properties(kFstProperties, false); const auto start1 = fst1->Start(); if (start1 == kNoStateId) { if (props2 & kError) fst1->SetProperties(kError, kError); return; } const auto numstates1 = fst1->NumStates(); if (fst2.Properties(kExpanded, false)) { fst1->ReserveStates(numstates1 + CountStates(fst2)); } for (StateIterator<Fst<Arc>> siter2(fst2); !siter2.Done(); siter2.Next()) { const auto s1 = fst1->AddState(); const auto s2 = siter2.Value(); fst1->SetFinal(s1, fst2.Final(s2)); fst1->ReserveArcs(s1, fst2.NumArcs(s2)); for (ArcIterator<Fst<Arc>> aiter(fst2, s2); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); arc.nextstate += numstates1; fst1->AddArc(s1, arc); } } const auto start2 = fst2.Start(); for (StateId s1 = 0; s1 < numstates1; ++s1) { const auto weight = fst1->Final(s1); if (weight != Weight::Zero()) { fst1->SetFinal(s1, Weight::Zero()); if (start2 != kNoStateId) { fst1->AddArc(s1, Arc(0, 0, weight, start2 + numstates1)); } } } if (start2 != kNoStateId) { fst1->SetProperties(ConcatProperties(props1, props2), kFstProperties); } } // Computes the concatentation of two FSTs. This version modifies its // MutableFst argument (in second position). // // Complexity: // // Time: O(V1 + E1) // Space: O(V1 + E1) // // where Vi is the number of states, and Ei is the number of arcs, of the ith // FST. template <class Arc> void Concat(const Fst<Arc> &fst1, MutableFst<Arc> *fst2) { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // Checks that the symbol table are compatible. if (!CompatSymbols(fst1.InputSymbols(), fst2->InputSymbols()) || !CompatSymbols(fst1.OutputSymbols(), fst2->OutputSymbols())) { FSTERROR() << "Concat: Input/output symbol tables of 1st argument " << "does not match input/output symbol tables of 2nd argument"; fst2->SetProperties(kError, kError); return; } const auto props1 = fst1.Properties(kFstProperties, false); const auto props2 = fst2->Properties(kFstProperties, false); const auto start2 = fst2->Start(); if (start2 == kNoStateId) { if (props1 & kError) fst2->SetProperties(kError, kError); return; } const auto numstates2 = fst2->NumStates(); if (fst1.Properties(kExpanded, false)) { fst2->ReserveStates(numstates2 + CountStates(fst1)); } for (StateIterator<Fst<Arc>> siter(fst1); !siter.Done(); siter.Next()) { const auto s1 = siter.Value(); const auto s2 = fst2->AddState(); const auto weight = fst1.Final(s1); if (weight != Weight::Zero()) { fst2->ReserveArcs(s2, fst1.NumArcs(s1) + 1); fst2->AddArc(s2, Arc(0, 0, weight, start2)); } else { fst2->ReserveArcs(s2, fst1.NumArcs(s1)); } for (ArcIterator<Fst<Arc>> aiter(fst1, s1); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); arc.nextstate += numstates2; fst2->AddArc(s2, arc); } } const auto start1 = fst1.Start(); if (start1 != kNoStateId) { fst2->SetStart(start1 + numstates2); fst2->SetProperties(ConcatProperties(props1, props2), kFstProperties); } else { fst2->SetStart(fst2->AddState()); } } // Computes the concatentation of two FSTs. This version modifies its // RationalFst input (in first position). template <class Arc> void Concat(RationalFst<Arc> *fst1, const Fst<Arc> &fst2) { fst1->GetMutableImpl()->AddConcat(fst2, true); } // Computes the concatentation of two FSTs. This version modifies its // RationalFst input (in second position). template <class Arc> void Concat(const Fst<Arc> &fst1, RationalFst<Arc> *fst2) { fst2->GetMutableImpl()->AddConcat(fst1, false); } using ConcatFstOptions = RationalFstOptions; // Computes the concatenation (product) of two FSTs; this version is a delayed // FST. If FST1 transduces string x to y with weight a and FST2 transduces // string w to v with weight b, then their concatenation transduces string xw // to yv with Times(a, b). // // Complexity: // // Time: O(v1 + e1 + v2 + e2), // Space: O(v1 + v2) // // where vi is the number of states visited, and ei is the number of arcs // visited, of the ith FST. Constant time and space to visit an input state or // arc is assumed and exclusive of caching. template <class A> class ConcatFst : public RationalFst<A> { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; ConcatFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2) { GetMutableImpl()->InitConcat(fst1, fst2); } ConcatFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const ConcatFstOptions &opts) : RationalFst<Arc>(opts) { GetMutableImpl()->InitConcat(fst1, fst2); } // See Fst<>::Copy() for doc. ConcatFst(const ConcatFst<Arc> &fst, bool safe = false) : RationalFst<Arc>(fst, safe) {} // Get a copy of this ConcatFst. See Fst<>::Copy() for further doc. ConcatFst<Arc> *Copy(bool safe = false) const override { return new ConcatFst<Arc>(*this, safe); } private: using ImplToFst<internal::RationalFstImpl<Arc>>::GetImpl; using ImplToFst<internal::RationalFstImpl<Arc>>::GetMutableImpl; }; // Specialization for ConcatFst. template <class Arc> class StateIterator<ConcatFst<Arc>> : public StateIterator<RationalFst<Arc>> { public: explicit StateIterator(const ConcatFst<Arc> &fst) : StateIterator<RationalFst<Arc>>(fst) {} }; // Specialization for ConcatFst. template <class Arc> class ArcIterator<ConcatFst<Arc>> : public ArcIterator<RationalFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const ConcatFst<Arc> &fst, StateId s) : ArcIterator<RationalFst<Arc>>(fst, s) {} }; // Useful alias when using StdArc. using StdConcatFst = ConcatFst<StdArc>; } // namespace fst #endif // FST_CONCAT_H_
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers/rocm/rocm_provider_factory.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "onnxruntime_c_api.h" #ifdef __cplusplus extern "C" { #endif /** * \param device_id hip device id, starts from zero. */ ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_ROCM, _In_ OrtSessionOptions* options, int device_id, size_t gpu_mem_limit); #ifdef __cplusplus } #endif
0
coqui_public_repos/STT/native_client/kenlm/util
coqui_public_repos/STT/native_client/kenlm/util/double-conversion/cached-powers.h
// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_ #define DOUBLE_CONVERSION_CACHED_POWERS_H_ #include "diy-fp.h" namespace kenlm_double_conversion { namespace PowersOfTenCache { // Not all powers of ten are cached. The decimal exponent of two neighboring // cached numbers will differ by kDecimalExponentDistance. static const int kDecimalExponentDistance = 8; static const int kMinDecimalExponent = -348; static const int kMaxDecimalExponent = 340; // Returns a cached power-of-ten with a binary exponent in the range // [min_exponent; max_exponent] (boundaries included). void GetCachedPowerForBinaryExponentRange(int min_exponent, int max_exponent, DiyFp* power, int* decimal_exponent); // Returns a cached power of ten x ~= 10^k such that // k <= decimal_exponent < k + kCachedPowersDecimalDistance. // The given decimal_exponent must satisfy // kMinDecimalExponent <= requested_exponent, and // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance. void GetCachedPowerForDecimalExponent(int requested_exponent, DiyFp* power, int* found_exponent); } // namespace PowersOfTenCache } // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_CACHED_POWERS_H_
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/buffer_deleter.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/framework/allocator.h" namespace onnxruntime { // TODO: Do we need this class or is IAllocator::MakeUniquePtr sufficient/better class BufferDeleter { public: BufferDeleter() : alloc_(nullptr) {} BufferDeleter(AllocatorPtr alloc) : alloc_(alloc) {} void operator()(void* p) const { if (alloc_) alloc_->Free(p); } private: // TODO: we may need consider the lifetime of alloc carefully // The alloc_ here is the allocator that used to allocate the buffer // And need go with the unique_ptr together. If it is using our internal // allocator, it is ok as our allocators are global managed. But if it // is provide by user, user need to be very careful about it. // A weak_ptr may be a choice to reduce the impact, but that require to // change our current allocator mgr to use shared_ptr. Will revisit it // later. AllocatorPtr alloc_; }; using BufferUniquePtr = std::unique_ptr<void, BufferDeleter>; using BufferNakedPtr = void*; } // namespace onnxruntime
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/string.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Utilities to convert strings into FSTs. #ifndef FST_STRING_H_ #define FST_STRING_H_ #include <memory> #include <sstream> #include <string> #include <vector> #include <fst/flags.h> #include <fst/log.h> #include <fst/compact-fst.h> #include <fst/icu.h> #include <fst/mutable-fst.h> #include <fst/util.h> DECLARE_string(fst_field_separator); namespace fst { enum StringTokenType { SYMBOL = 1, BYTE = 2, UTF8 = 3 }; namespace internal { template <class Label> bool ConvertSymbolToLabel(const char *str, const SymbolTable *syms, Label unknown_label, bool allow_negative, Label *output) { int64 n; if (syms) { n = syms->Find(str); if ((n == -1) && (unknown_label != kNoLabel)) n = unknown_label; if (n == -1 || (!allow_negative && n < 0)) { VLOG(1) << "ConvertSymbolToLabel: Symbol \"" << str << "\" is not mapped to any integer label, symbol table = " << syms->Name(); return false; } } else { char *p; n = strtoll(str, &p, 10); if (p < str + strlen(str) || (!allow_negative && n < 0)) { VLOG(1) << "ConvertSymbolToLabel: Bad label integer " << "= \"" << str << "\""; return false; } } *output = n; return true; } template <class Label> bool ConvertStringToLabels(const string &str, StringTokenType token_type, const SymbolTable *syms, Label unknown_label, bool allow_negative, std::vector<Label> *labels) { labels->clear(); if (token_type == StringTokenType::BYTE) { for (const char c : str) labels->push_back(c); } else if (token_type == StringTokenType::UTF8) { return UTF8StringToLabels(str, labels); } else { std::unique_ptr<char[]> c_str(new char[str.size() + 1]); str.copy(c_str.get(), str.size()); c_str[str.size()] = 0; std::vector<char *> vec; const string separator = "\n" + FLAGS_fst_field_separator; SplitString(c_str.get(), separator.c_str(), &vec, true); for (const char *c : vec) { Label label; if (!ConvertSymbolToLabel(c, syms, unknown_label, allow_negative, &label)) { return false; } labels->push_back(label); } } return true; } } // namespace internal // Functor for compiling a string in an FST. template <class Arc> class StringCompiler { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit StringCompiler(StringTokenType token_type, const SymbolTable *syms = nullptr, Label unknown_label = kNoLabel, bool allow_negative = false) : token_type_(token_type), syms_(syms), unknown_label_(unknown_label), allow_negative_(allow_negative) {} // Compiles string into an FST. template <class FST> bool operator()(const string &str, FST *fst) const { std::vector<Label> labels; if (!internal::ConvertStringToLabels(str, token_type_, syms_, unknown_label_, allow_negative_, &labels)) { return false; } Compile(labels, fst); return true; } template <class FST> bool operator()(const string &str, FST *fst, Weight weight) const { std::vector<Label> labels; if (!internal::ConvertStringToLabels(str, token_type_, syms_, unknown_label_, allow_negative_, &labels)) { return false; } Compile(labels, fst, std::move(weight)); return true; } private: void Compile(const std::vector<Label> &labels, MutableFst<Arc> *fst, Weight weight = Weight::One()) const { fst->DeleteStates(); while (fst->NumStates() <= labels.size()) fst->AddState(); for (StateId i = 0; i < labels.size(); ++i) { fst->AddArc(i, Arc(labels[i], labels[i], Weight::One(), i + 1)); } fst->SetStart(0); fst->SetFinal(labels.size(), std::move(weight)); } template <class Unsigned> void Compile(const std::vector<Label> &labels, CompactStringFst<Arc, Unsigned> *fst) const { fst->SetCompactElements(labels.begin(), labels.end()); } template <class Unsigned> void Compile(const std::vector<Label> &labels, CompactWeightedStringFst<Arc, Unsigned> *fst, const Weight &weight = Weight::One()) const { std::vector<std::pair<Label, Weight>> compacts; compacts.reserve(labels.size() + 1); for (StateId i = 0; i < static_cast<StateId>(labels.size()) - 1; ++i) { compacts.emplace_back(labels[i], Weight::One()); } compacts.emplace_back(!labels.empty() ? labels.back() : kNoLabel, weight); fst->SetCompactElements(compacts.begin(), compacts.end()); } const StringTokenType token_type_; const SymbolTable *syms_; // Symbol table (used when token type is symbol). const Label unknown_label_; // Label for token missing from symbol table. const bool allow_negative_; // Negative labels allowed? StringCompiler(const StringCompiler &) = delete; StringCompiler &operator=(const StringCompiler &) = delete; }; // Functor for printing a string FST as a string. template <class Arc> class StringPrinter { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit StringPrinter(StringTokenType token_type, const SymbolTable *syms = nullptr) : token_type_(token_type), syms_(syms) {} // Converts the FST into a string. bool operator()(const Fst<Arc> &fst, string *result) { if (!FstToLabels(fst)) { VLOG(1) << "StringPrinter::operator(): FST is not a string"; return false; } result->clear(); if (token_type_ == StringTokenType::SYMBOL) { std::stringstream sstrm; for (size_t i = 0; i < labels_.size(); ++i) { if (i) sstrm << *(FLAGS_fst_field_separator.rbegin()); if (!PrintLabel(labels_[i], sstrm)) return false; } *result = sstrm.str(); } else if (token_type_ == StringTokenType::BYTE) { result->reserve(labels_.size()); for (size_t i = 0; i < labels_.size(); ++i) result->push_back(labels_[i]); } else if (token_type_ == StringTokenType::UTF8) { return LabelsToUTF8String(labels_, result); } else { VLOG(1) << "StringPrinter::operator(): Unknown token type: " << token_type_; return false; } return true; } private: bool FstToLabels(const Fst<Arc> &fst) { labels_.clear(); auto s = fst.Start(); if (s == kNoStateId) { VLOG(2) << "StringPrinter::FstToLabels: Invalid starting state for " << "string FST"; return false; } while (fst.Final(s) == Weight::Zero()) { ArcIterator<Fst<Arc>> aiter(fst, s); if (aiter.Done()) { VLOG(2) << "StringPrinter::FstToLabels: String FST traversal does " << "not reach final state"; return false; } const auto &arc = aiter.Value(); labels_.push_back(arc.olabel); s = arc.nextstate; if (s == kNoStateId) { VLOG(2) << "StringPrinter::FstToLabels: Transition to invalid state"; return false; } aiter.Next(); if (!aiter.Done()) { VLOG(2) << "StringPrinter::FstToLabels: State with multiple " << "outgoing arcs found"; return false; } } return true; } bool PrintLabel(Label label, std::ostream &ostrm) { if (syms_) { const auto symbol = syms_->Find(label); if (symbol == "") { VLOG(2) << "StringPrinter::PrintLabel: Integer " << label << " is not " << "mapped to any textual symbol, symbol table = " << syms_->Name(); return false; } ostrm << symbol; } else { ostrm << label; } return true; } const StringTokenType token_type_; const SymbolTable *syms_; // Symbol table (used when token type is symbol). std::vector<Label> labels_; // Input FST labels. StringPrinter(const StringPrinter &) = delete; StringPrinter &operator=(const StringPrinter &) = delete; }; } // namespace fst #endif // FST_STRING_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/native_client/multistrap_raspbian_buster.conf
[General] arch=armhf noauth=false unpack=true debootstrap=Raspbian aptsources=Raspbian cleanup=true [Raspbian] packages=apt libc6 libc6-dev libffi-dev libstdc++-6-dev linux-libc-dev libpython3.7-dev libsox-dev python3-numpy python3-setuptools source=http://raspbian.raspberrypi.org/raspbian/ keyring=raspbian-archive-keyring components=main suite=buster
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/equal.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Function to test equality of two FSTs. #ifndef FST_EQUAL_H_ #define FST_EQUAL_H_ #include <fst/log.h> #include <fst/fst.h> #include <fst/test-properties.h> namespace fst { constexpr uint32 kEqualFsts = 0x0001; constexpr uint32 kEqualFstTypes = 0x0002; constexpr uint32 kEqualCompatProperties = 0x0004; constexpr uint32 kEqualCompatSymbols = 0x0008; constexpr uint32 kEqualAll = kEqualFsts | kEqualFstTypes | kEqualCompatProperties | kEqualCompatSymbols; // Tests if two Fsts have the same states and arcs in the same order (when // etype & kEqualFst). // Also optional checks equality of Fst types (etype & kEqualFstTypes) and // compatibility of stored properties (etype & kEqualCompatProperties) and // of symbol tables (etype & kEqualCompatSymbols). template <class Arc> bool Equal(const Fst<Arc> &fst1, const Fst<Arc> &fst2, float delta = kDelta, uint32 etype = kEqualFsts) { if ((etype & kEqualFstTypes) && (fst1.Type() != fst2.Type())) { VLOG(1) << "Equal: Mismatched FST types (" << fst1.Type() << " != " << fst2.Type() << ")"; return false; } if ((etype & kEqualCompatProperties) && !CompatProperties(fst1.Properties(kCopyProperties, false), fst2.Properties(kCopyProperties, false))) { VLOG(1) << "Equal: Properties not compatible"; return false; } if (etype & kEqualCompatSymbols) { if (!CompatSymbols(fst1.InputSymbols(), fst2.InputSymbols(), false)) { VLOG(1) << "Equal: Input symbols not compatible"; return false; } if (!CompatSymbols(fst1.OutputSymbols(), fst2.OutputSymbols(), false)) { VLOG(1) << "Equal: Output symbols not compatible"; return false; } } if (!(etype & kEqualFsts)) return true; if (fst1.Start() != fst2.Start()) { VLOG(1) << "Equal: Mismatched start states (" << fst1.Start() << " != " << fst2.Start() << ")"; return false; } StateIterator<Fst<Arc>> siter1(fst1); StateIterator<Fst<Arc>> siter2(fst2); while (!siter1.Done() || !siter2.Done()) { if (siter1.Done() || siter2.Done()) { VLOG(1) << "Equal: Mismatched number of states"; return false; } const auto s1 = siter1.Value(); const auto s2 = siter2.Value(); if (s1 != s2) { VLOG(1) << "Equal: Mismatched states (" << s1 << "!= " << s2 << ")"; return false; } const auto &final1 = fst1.Final(s1); const auto &final2 = fst2.Final(s2); if (!ApproxEqual(final1, final2, delta)) { VLOG(1) << "Equal: Mismatched final weights at state " << s1 << " (" << final1 << " != " << final2 << ")"; return false; } ArcIterator<Fst<Arc>> aiter1(fst1, s1); ArcIterator<Fst<Arc>> aiter2(fst2, s2); for (auto a = 0; !aiter1.Done() || !aiter2.Done(); ++a) { if (aiter1.Done() || aiter2.Done()) { VLOG(1) << "Equal: Mismatched number of arcs at state " << s1; return false; } const auto &arc1 = aiter1.Value(); const auto &arc2 = aiter2.Value(); if (arc1.ilabel != arc2.ilabel) { VLOG(1) << "Equal: Mismatched arc input labels at state " << s1 << ", arc " << a << " (" << arc1.ilabel << " != " << arc2.ilabel << ")"; return false; } else if (arc1.olabel != arc2.olabel) { VLOG(1) << "Equal: Mismatched arc output labels at state " << s1 << ", arc " << a << " (" << arc1.olabel << " != " << arc2.olabel << ")"; return false; } else if (!ApproxEqual(arc1.weight, arc2.weight, delta)) { VLOG(1) << "Equal: Mismatched arc weights at state " << s1 << ", arc " << a << " (" << arc1.weight << " != " << arc2.weight << ")"; return false; } else if (arc1.nextstate != arc2.nextstate) { VLOG(1) << "Equal: Mismatched next state at state " << s1 << ", arc " << a << " (" << arc1.nextstate << " != " << arc2.nextstate << ")"; return false; } aiter1.Next(); aiter2.Next(); } // Sanity checks: should never fail. if (fst1.NumArcs(s1) != fst2.NumArcs(s2)) { FSTERROR() << "Equal: Inconsistent arc counts at state " << s1 << " (" << fst1.NumArcs(s1) << " != " << fst2.NumArcs(s2) << ")"; return false; } if (fst1.NumInputEpsilons(s1) != fst2.NumInputEpsilons(s2)) { FSTERROR() << "Equal: Inconsistent input epsilon counts at state " << s1 << " (" << fst1.NumInputEpsilons(s1) << " != " << fst2.NumInputEpsilons(s2) << ")"; return false; } if (fst1.NumOutputEpsilons(s1) != fst2.NumOutputEpsilons(s2)) { FSTERROR() << "Equal: Inconsistent output epsilon counts at state " << s1 << " (" << fst1.NumOutputEpsilons(s1) << " != " << fst2.NumOutputEpsilons(s2) << ")"; } siter1.Next(); siter2.Next(); } return true; } } // namespace fst #endif // FST_EQUAL_H_
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/models/tortoise.py
import os import random from contextlib import contextmanager from dataclasses import dataclass from time import time import torch import torch.nn.functional as F import torchaudio from coqpit import Coqpit from tqdm import tqdm from TTS.tts.layers.tortoise.arch_utils import TorchMelSpectrogram from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, load_voice, wav_to_univnet_mel from TTS.tts.layers.tortoise.autoregressive import UnifiedVoice from TTS.tts.layers.tortoise.classifier import AudioMiniEncoderWithClassifierHead from TTS.tts.layers.tortoise.clvp import CLVP from TTS.tts.layers.tortoise.diffusion import SpacedDiffusion, get_named_beta_schedule, space_timesteps from TTS.tts.layers.tortoise.diffusion_decoder import DiffusionTts from TTS.tts.layers.tortoise.random_latent_generator import RandomLatentConverter from TTS.tts.layers.tortoise.tokenizer import VoiceBpeTokenizer from TTS.tts.layers.tortoise.vocoder import VocConf, VocType from TTS.tts.layers.tortoise.wav2vec_alignment import Wav2VecAlignment from TTS.tts.models.base_tts import BaseTTS def pad_or_truncate(t, length): """ Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s. """ tp = t[..., :length] if t.shape[-1] == length: tp = t elif t.shape[-1] < length: tp = F.pad(t, (0, length - t.shape[-1])) return tp def deterministic_state(seed=None): """ Sets the random seeds that tortoise uses to the current time() and returns that seed so results can be reproduced. """ seed = int(time()) if seed is None else seed torch.manual_seed(seed) random.seed(seed) # Can't currently set this because of CUBLAS. TODO: potentially enable it if necessary. # torch.use_deterministic_algorithms(True) return seed def load_discrete_vocoder_diffuser( trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1, sampler="ddim", ): """ Helper function to load a GaussianDiffusion instance configured for use as a vocoder. """ return SpacedDiffusion( use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type="epsilon", model_var_type="learned_range", loss_type="mse", betas=get_named_beta_schedule("linear", trained_diffusion_steps), conditioning_free=cond_free, conditioning_free_k=cond_free_k, sampler=sampler, ) def format_conditioning(clip, cond_length=132300, device="cuda", **kwargs): """ Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models. """ gap = clip.shape[-1] - cond_length if gap < 0: clip = F.pad(clip, pad=(0, abs(gap))) elif gap > 0: rand_start = random.randint(0, gap) clip = clip[:, rand_start : rand_start + cond_length] mel_clip = TorchMelSpectrogram(**kwargs)(clip.unsqueeze(0)).squeeze(0) return mel_clip.unsqueeze(0).to(device) def fix_autoregressive_output(codes, stop_token, complain=True): """ This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was trained on and what the autoregressive code generator creates (which has no padding or end). This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE and copying out the last few codes. Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar. """ # Strip off the autoregressive stop token and add padding. stop_token_indices = (codes == stop_token).nonzero() if len(stop_token_indices) == 0: if complain: print( "No stop tokens found in one of the generated voice clips. This typically means the spoken audio is " "too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, " "try breaking up your input text." ) return codes codes[stop_token_indices] = 83 stm = stop_token_indices.min().item() codes[stm:] = 83 if stm - 3 < codes.shape[0]: codes[-3] = 45 codes[-2] = 45 codes[-1] = 248 return codes def do_spectrogram_diffusion( diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True, ): """ Uses the specified diffusion model to convert discrete codes into a spectrogram. """ with torch.no_grad(): output_seq_len = ( latents.shape[1] * 4 * 24000 // 22050 ) # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. output_shape = (latents.shape[0], 100, output_seq_len) precomputed_embeddings = diffusion_model.timestep_independent( latents, conditioning_latents, output_seq_len, False ) noise = torch.randn(output_shape, device=latents.device) * temperature mel = diffuser.sample_loop( diffusion_model, output_shape, noise=noise, model_kwargs={"precomputed_aligned_embeddings": precomputed_embeddings}, progress=verbose, ) return denormalize_tacotron_mel(mel)[:, :, :output_seq_len] def classify_audio_clip(clip, model_dir): """ Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise. :param clip: torch tensor containing audio waveform data (get it from load_audio) :return: True if the clip was classified as coming from Tortoise and false if it was classified as real. """ classifier = AudioMiniEncoderWithClassifierHead( 2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4, resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32, dropout=0, kernel_size=5, distribute_zero_label=False, ) classifier.load_state_dict(torch.load(os.path.join(model_dir, "classifier.pth"), map_location=torch.device("cpu"))) clip = clip.cpu().unsqueeze(0) results = F.softmax(classifier(clip), dim=-1) return results[0][0] def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ if torch.cuda.is_available(): _, available = torch.cuda.mem_get_info() availableGb = available / (1024**3) batch_size = 1 if availableGb > 14: batch_size = 16 elif availableGb > 10: batch_size = 8 elif availableGb > 7: batch_size = 4 return batch_size @dataclass class TortoiseAudioConfig(Coqpit): sample_rate: int = 22050 diffusion_sample_rate: int = 24000 output_sample_rate: int = 24000 @dataclass class TortoiseArgs(Coqpit): """A dataclass to represent Tortoise model arguments that define the model structure. Args: autoregressive_batch_size (int): The size of the auto-regressive batch. enable_redaction (bool, optional): Whether to enable redaction. Defaults to True. high_vram (bool, optional): Whether to use high VRAM. Defaults to False. kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True. ar_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None. clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. diff_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. vocoder (VocType, optional): The vocoder to use for synthesis. Defaults to VocConf.Univnet. For UnifiedVoice model: ar_max_mel_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. ar_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. ar_max_conditioning_inputs (int, optional): The maximum conditioning inputs for the autoregressive model. Defaults to 2. ar_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. ar_model_dim (int, optional): The model dimension for the autoregressive model. Defaults to 1024. ar_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. ar_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. ar_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. ar_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. ar_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. For DiffTTS model: diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. diff_num_layers (int, optional): The number of layers for the DiffTTS model. Defaults to 10. diff_in_channels (int, optional): The input channels for the DiffTTS model. Defaults to 100. diff_out_channels (int, optional): The output channels for the DiffTTS model. Defaults to 200. diff_in_latent_channels (int, optional): The input latent channels for the DiffTTS model. Defaults to 1024. diff_in_tokens (int, optional): The input tokens for the DiffTTS model. Defaults to 8193. diff_dropout (int, optional): The dropout percentage for the DiffTTS model. Defaults to 0. diff_use_fp16 (bool, optional): Whether to use fp16 for the DiffTTS model. Defaults to False. diff_num_heads (int, optional): The number of heads for the DiffTTS model. Defaults to 16. diff_layer_drop (int, optional): The layer dropout percentage for the DiffTTS model. Defaults to 0. diff_unconditioned_percentage (int, optional): The percentage of unconditioned inputs for the DiffTTS model. Defaults to 0. For ConditionalLatentVariablePerseq model: clvp_dim_text (int): The dimension of the text input for the CLVP module. Defaults to 768. clvp_dim_speech (int): The dimension of the speech input for the CLVP module. Defaults to 768. clvp_dim_latent (int): The dimension of the latent representation for the CLVP module. Defaults to 768. clvp_num_text_tokens (int): The number of text tokens used by the CLVP module. Defaults to 256. clvp_text_enc_depth (int): The depth of the text encoder in the CLVP module. Defaults to 20. clvp_text_seq_len (int): The maximum sequence length of the text input for the CLVP module. Defaults to 350. clvp_text_heads (int): The number of attention heads used by the text encoder in the CLVP module. Defaults to 12. clvp_num_speech_tokens (int): The number of speech tokens used by the CLVP module. Defaults to 8192. clvp_speech_enc_depth (int): The depth of the speech encoder in the CLVP module. Defaults to 20. clvp_speech_heads (int): The number of attention heads used by the speech encoder in the CLVP module. Defaults to 12. clvp_speech_seq_len (int): The maximum sequence length of the speech input for the CLVP module. Defaults to 430. clvp_use_xformers (bool): A flag indicating whether the model uses transformers in the CLVP module. Defaults to True. duration_const (int): A constant value used in the model. Defaults to 102400. """ autoregressive_batch_size: int = 1 enable_redaction: bool = False high_vram: bool = False kv_cache: bool = True ar_checkpoint: str = None clvp_checkpoint: str = None diff_checkpoint: str = None num_chars: int = 255 vocoder: VocType = VocConf.Univnet # UnifiedVoice params ar_max_mel_tokens: int = 604 ar_max_text_tokens: int = 402 ar_max_conditioning_inputs: int = 2 ar_layers: int = 30 ar_model_dim: int = 1024 ar_heads: int = 16 ar_number_text_tokens: int = 255 ar_start_text_token: int = 255 ar_checkpointing: bool = False ar_train_solo_embeddings: bool = False # DiffTTS params diff_model_channels: int = 1024 diff_num_layers: int = 10 diff_in_channels: int = 100 diff_out_channels: int = 200 diff_in_latent_channels: int = 1024 diff_in_tokens: int = 8193 diff_dropout: int = 0 diff_use_fp16: bool = False diff_num_heads: int = 16 diff_layer_drop: int = 0 diff_unconditioned_percentage: int = 0 # clvp params clvp_dim_text: int = 768 clvp_dim_speech: int = 768 clvp_dim_latent: int = 768 clvp_num_text_tokens: int = 256 clvp_text_enc_depth: int = 20 clvp_text_seq_len: int = 350 clvp_text_heads: int = 12 clvp_num_speech_tokens: int = 8192 clvp_speech_enc_depth: int = 20 clvp_speech_heads: int = 12 clvp_speech_seq_len: int = 430 clvp_use_xformers: bool = True # constants duration_const: int = 102400 class Tortoise(BaseTTS): """Tortoise model class. Currently only supports inference. Examples: >>> from TTS.tts.configs.tortoise_config import TortoiseConfig >>> from TTS.tts.models.tortoise import Tortoise >>> config = TortoiseConfig() >>> model = Tortoise.inif_from_config(config) >>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True) """ def __init__(self, config: Coqpit): super().__init__(config, ap=None, tokenizer=None) self.mel_norm_path = None self.config = config self.ar_checkpoint = self.args.ar_checkpoint self.diff_checkpoint = self.args.diff_checkpoint # TODO: check if this is even needed self.models_dir = config.model_dir self.autoregressive_batch_size = ( pick_best_batch_size_for_gpu() if self.args.autoregressive_batch_size is None else self.args.autoregressive_batch_size ) self.enable_redaction = self.args.enable_redaction self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if self.enable_redaction: self.aligner = Wav2VecAlignment() self.tokenizer = VoiceBpeTokenizer() self.autoregressive = UnifiedVoice( max_mel_tokens=self.args.ar_max_mel_tokens, max_text_tokens=self.args.ar_max_text_tokens, max_conditioning_inputs=self.args.ar_max_conditioning_inputs, layers=self.args.ar_layers, model_dim=self.args.ar_model_dim, heads=self.args.ar_heads, number_text_tokens=self.args.ar_number_text_tokens, start_text_token=self.args.ar_start_text_token, checkpointing=self.args.ar_checkpointing, train_solo_embeddings=self.args.ar_train_solo_embeddings, ).cpu() self.diffusion = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, in_channels=self.args.diff_in_channels, out_channels=self.args.diff_out_channels, in_latent_channels=self.args.diff_in_latent_channels, in_tokens=self.args.diff_in_tokens, dropout=self.args.diff_dropout, use_fp16=self.args.diff_use_fp16, num_heads=self.args.diff_num_heads, layer_drop=self.args.diff_layer_drop, unconditioned_percentage=self.args.diff_unconditioned_percentage, ).cpu() self.clvp = CLVP( dim_text=self.args.clvp_dim_text, dim_speech=self.args.clvp_dim_speech, dim_latent=self.args.clvp_dim_latent, num_text_tokens=self.args.clvp_num_text_tokens, text_enc_depth=self.args.clvp_text_enc_depth, text_seq_len=self.args.clvp_text_seq_len, text_heads=self.args.clvp_text_heads, num_speech_tokens=self.args.clvp_num_speech_tokens, speech_enc_depth=self.args.clvp_speech_enc_depth, speech_heads=self.args.clvp_speech_heads, speech_seq_len=self.args.clvp_speech_seq_len, use_xformers=self.args.clvp_use_xformers, ).cpu() self.vocoder = self.args.vocoder.value.constructor().cpu() # Random latent generators (RLGs) are loaded lazily. self.rlg_auto = None self.rlg_diffusion = None if self.args.high_vram: self.autoregressive = self.autoregressive.to(self.device) self.diffusion = self.diffusion.to(self.device) self.clvp = self.clvp.to(self.device) self.vocoder = self.vocoder.to(self.device) self.high_vram = self.args.high_vram @contextmanager def temporary_cuda(self, model): if self.high_vram: yield model else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST) cond_mel = wav_to_univnet_mel( sample.to(self.device), do_normalization=False, device=self.device, ) diffusion_conds.append(cond_mel) else: from math import ceil if latent_averaging_mode == 2: temp_diffusion_conds = [] for chunk in range(ceil(sample.shape[1] / DURS_CONST)): current_sample = sample[:, chunk * DURS_CONST : (chunk + 1) * DURS_CONST] current_sample = pad_or_truncate(current_sample, DURS_CONST) cond_mel = wav_to_univnet_mel( current_sample.to(self.device), do_normalization=False, device=self.device, ) if latent_averaging_mode == 1: diffusion_conds.append(cond_mel) elif latent_averaging_mode == 2: temp_diffusion_conds.append(cond_mel) if latent_averaging_mode == 2: diffusion_conds.append(torch.stack(temp_diffusion_conds).mean(0)) diffusion_conds = torch.stack(diffusion_conds, dim=1) with self.temporary_cuda(self.diffusion) as diffusion: diffusion_latent = diffusion.get_conditioning(diffusion_conds) if return_mels: return auto_latent, diffusion_latent, auto_conds, diffusion_conds return auto_latent, diffusion_latent def get_random_conditioning_latents(self): # Lazy-load the RLG models. if self.rlg_auto is None: self.rlg_auto = RandomLatentConverter(1024).eval() self.rlg_auto.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_auto.pth"), map_location=torch.device("cpu"), ) ) self.rlg_diffusion = RandomLatentConverter(2048).eval() self.rlg_diffusion.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_diffuser.pth"), map_location=torch.device("cpu"), ) ) with torch.no_grad(): return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0])) def synthesize(self, text, config, speaker_id="random", voice_dirs=None, **kwargs): """Synthesize speech with the given input text. Args: text (str): Input text. config (TortoiseConfig): Config with inference parameters. speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. **kwargs: Inference settings. See `inference()`. Returns: A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` as latents used at inference. """ speaker_id = "random" if speaker_id is None else speaker_id if voice_dirs is not None: voice_dirs = [voice_dirs] voice_samples, conditioning_latents = load_voice(speaker_id, voice_dirs) else: voice_samples, conditioning_latents = load_voice(speaker_id) outputs = self.inference_with_config( text, config, voice_samples=voice_samples, conditioning_latents=conditioning_latents, **kwargs ) return_dict = { "wav": outputs["wav"], "deterministic_seed": outputs["deterministic_seed"], "text_inputs": outputs["text"], "voice_samples": outputs["voice_samples"], "conditioning_latents": outputs["conditioning_latents"], } return return_dict def inference_with_config(self, text, config, **kwargs): """ inference with config #TODO describe in detail """ # Use generally found best tuning knobs for generation. settings = { "temperature": config.temperature, "length_penalty": config.length_penalty, "repetition_penalty": config.repetition_penalty, "top_p": config.top_p, "cond_free_k": config.cond_free_k, "diffusion_temperature": config.diffusion_temperature, "sampler": config.sampler, } # Presets are defined here. presets = { "single_sample": { "num_autoregressive_samples": 8, "diffusion_iterations": 10, "sampler": "ddim", }, "ultra_fast": { "num_autoregressive_samples": 16, "diffusion_iterations": 10, "sampler": "ddim", }, "ultra_fast_old": { "num_autoregressive_samples": 16, "diffusion_iterations": 30, "cond_free": False, }, "very_fast": { "num_autoregressive_samples": 32, "diffusion_iterations": 30, "sampler": "dpm++2m", }, "fast": { "num_autoregressive_samples": 5, "diffusion_iterations": 50, "sampler": "ddim", }, "fast_old": {"num_autoregressive_samples": 96, "diffusion_iterations": 80}, "standard": { "num_autoregressive_samples": 5, "diffusion_iterations": 200, }, "high_quality": { "num_autoregressive_samples": 256, "diffusion_iterations": 400, }, } if "preset" in kwargs: settings.update(presets[kwargs["preset"]]) kwargs.pop("preset") settings.update(kwargs) # allow overriding of preset settings with kwargs return self.inference(text, **settings) def inference( self, text, voice_samples=None, conditioning_latents=None, k=1, verbose=True, use_deterministic_seed=None, return_deterministic_state=False, latent_averaging_mode=0, # autoregressive generation parameters follow num_autoregressive_samples=16, temperature=0.8, length_penalty=1, repetition_penalty=2.0, top_p=0.8, max_mel_tokens=500, # diffusion generation parameters follow diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0, sampler="ddim", half=True, original_tortoise=False, **hf_generate_kwargs, ): """ This function produces an audio clip of the given text being spoken with the given reference voice. Args: text: (str) Text to be spoken. voice_samples: (List[Tuple[torch.Tensor]]) List of an arbitrary number of reference clips, which should be tuple-pairs of torch tensors containing arbitrary kHz waveform data. conditioning_latents: (Tuple[autoregressive_conditioning_latent, diffusion_conditioning_latent]) A tuple of (autoregressive_conditioning_latent, diffusion_conditioning_latent), which can be provided in lieu of voice_samples. This is ignored unless `voice_samples=None`. Conditioning latents can be retrieved via `get_conditioning_latents()`. k: (int) The number of returned clips. The most likely (as determined by Tortoises' CLVP model) clips are returned. latent_averaging_mode: (int) 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample verbose: (bool) Whether or not to print log messages indicating the progress of creating a clip. Default=true. num_autoregressive_samples: (int) Number of samples taken from the autoregressive model, all of which are filtered using CLVP. As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great". temperature: (float) The softmax temperature of the autoregressive model. length_penalty: (float) A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs. repetition_penalty: (float) A penalty that prevents the autoregressive decoder from repeating itself during decoding. Can be used to reduce the incidence of long silences or "uhhhhhhs", etc. top_p: (float) P value used in nucleus sampling. (0,1]. Lower values mean the decoder produces more "likely" (aka boring) outputs. max_mel_tokens: (int) Restricts the output length. (0,600] integer. Each unit is 1/20 of a second. typical_sampling: (bool) Turns typical sampling on or off. This sampling mode is discussed in this paper: https://arxiv.org/abs/2202.00666 I was interested in the premise, but the results were not as good as I was hoping. This is off by default, but could use some tuning. typical_mass: (float) The typical_mass parameter from the typical_sampling algorithm. diffusion_iterations: (int) Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better, however. cond_free: (bool) Whether or not to perform conditioning-free diffusion. Conditioning-free diffusion performs two forward passes for each diffusion step: one with the outputs of the autoregressive model and one with no conditioning priors. The output of the two is blended according to the cond_free_k value below. Conditioning-free diffusion is the real deal, and dramatically improves realism. cond_free_k: (float) Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf]. As cond_free_k increases, the output becomes dominated by the conditioning-free signal. diffusion_temperature: (float) Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0 are the "mean" prediction of the diffusion network and will sound bland and smeared. hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive transformer. Extra keyword args fed to this function get forwarded directly to that API. Documentation here: https://huggingface.co/docs/transformers/internal/generation_utils Returns: Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length. Sample rate is 24kHz. """ deterministic_seed = deterministic_state(seed=use_deterministic_seed) text_tokens = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).to(self.device) text_tokens = F.pad(text_tokens, (0, 1)) # This may not be necessary. assert ( text_tokens.shape[-1] < 400 ), "Too much text provided. Break the text up into separate segments and re-try inference." if voice_samples is not None: ( auto_conditioning, diffusion_conditioning, _, _, ) = self.get_conditioning_latents( voice_samples, return_mels=True, latent_averaging_mode=latent_averaging_mode, original_tortoise=original_tortoise, ) elif conditioning_latents is not None: auto_conditioning, diffusion_conditioning = conditioning_latents else: ( auto_conditioning, diffusion_conditioning, ) = self.get_random_conditioning_latents() auto_conditioning = auto_conditioning.to(self.device) diffusion_conditioning = diffusion_conditioning.to(self.device) diffuser = load_discrete_vocoder_diffuser( desired_diffusion_steps=diffusion_iterations, cond_free=cond_free, cond_free_k=cond_free_k, sampler=sampler ) # in the case of single_sample, orig_batch_size = self.autoregressive_batch_size while num_autoregressive_samples % self.autoregressive_batch_size: self.autoregressive_batch_size //= 2 with torch.no_grad(): samples = [] num_batches = num_autoregressive_samples // self.autoregressive_batch_size stop_mel_token = self.autoregressive.stop_mel_token calm_token = ( 83 # This is the token for coding silence, which is fixed in place with "fix_autoregressive_output" ) self.autoregressive = self.autoregressive.to(self.device) if verbose: print("Generating autoregressive samples..") with self.temporary_cuda(self.autoregressive) as autoregressive, torch.autocast( device_type="cuda", dtype=torch.float16, enabled=half ): for b in tqdm(range(num_batches), disable=not verbose): codes = autoregressive.inference_speech( auto_conditioning, text_tokens, do_sample=True, top_p=top_p, temperature=temperature, num_return_sequences=self.autoregressive_batch_size, length_penalty=length_penalty, repetition_penalty=repetition_penalty, max_generate_length=max_mel_tokens, **hf_generate_kwargs, ) padding_needed = max_mel_tokens - codes.shape[1] codes = F.pad(codes, (0, padding_needed), value=stop_mel_token) samples.append(codes) self.autoregressive_batch_size = orig_batch_size # in the case of single_sample clip_results = [] with self.temporary_cuda(self.clvp) as clvp, torch.autocast( device_type="cuda", dtype=torch.float16, enabled=half ): for batch in tqdm(samples, disable=not verbose): for i in range(batch.shape[0]): batch[i] = fix_autoregressive_output(batch[i], stop_mel_token) clvp_res = clvp( text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False, ) clip_results.append(clvp_res) clip_results = torch.cat(clip_results, dim=0) samples = torch.cat(samples, dim=0) best_results = samples[torch.topk(clip_results, k=k).indices] del samples # The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning # inputs. Re-produce those for the top results. This could be made more efficient by storing all of these # results, but will increase memory usage. with self.temporary_cuda(self.autoregressive) as autoregressive: best_latents = autoregressive( auto_conditioning.repeat(k, 1), text_tokens.repeat(k, 1), torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), best_results, torch.tensor( [best_results.shape[-1] * self.autoregressive.mel_length_compression], device=text_tokens.device, ), return_latent=True, clip_inputs=False, ) del auto_conditioning if verbose: print("Transforming autoregressive outputs into audio..") wav_candidates = [] for b in range(best_results.shape[0]): codes = best_results[b].unsqueeze(0) latents = best_latents[b].unsqueeze(0) # Find the first occurrence of the "calm" token and trim the codes to that. ctokens = 0 for code in range(codes.shape[-1]): if codes[0, code] == calm_token: ctokens += 1 else: ctokens = 0 if ctokens > 8: # 8 tokens gives the diffusion model some "breathing room" to terminate speech. latents = latents[:, :code] break with self.temporary_cuda(self.diffusion) as diffusion: mel = do_spectrogram_diffusion( diffusion, diffuser, latents, diffusion_conditioning, temperature=diffusion_temperature, verbose=verbose, ) with self.temporary_cuda(self.vocoder) as vocoder: wav = vocoder.inference(mel) wav_candidates.append(wav.cpu()) def potentially_redact(clip, text): if self.enable_redaction: return self.aligner.redact(clip.squeeze(1), text).unsqueeze(1) return clip wav_candidates = [potentially_redact(wav_candidate, text) for wav_candidate in wav_candidates] if len(wav_candidates) > 1: res = wav_candidates else: res = wav_candidates[0] return_dict = { "wav": res, "deterministic_seed": None, "text": None, "voice_samples": None, "conditioning_latents": None, } if return_deterministic_state: return_dict = { "wav": res, "deterministic_seed": deterministic_seed, "text": text, "voice_samples": voice_samples, "conditioning_latents": conditioning_latents, } return return_dict def forward(self): raise NotImplementedError("Tortoise Training is not implemented") def eval_step(self): raise NotImplementedError("Tortoise Training is not implemented") @staticmethod def init_from_config(config: "TortoiseConfig", **kwargs): # pylint: disable=unused-argument return Tortoise(config) def load_checkpoint( self, config, checkpoint_dir, ar_checkpoint_path=None, diff_checkpoint_path=None, clvp_checkpoint_path=None, vocoder_checkpoint_path=None, eval=False, strict=True, **kwargs, ): # pylint: disable=unused-argument, redefined-builtin """Load a model checkpoints from a directory. This model is with multiple checkpoint files and it expects to have all the files to be under the given `checkpoint_dir` with the rigth names. If eval is True, set the model to eval mode. Args: config (TortoiseConfig): The model config. checkpoint_dir (str): The directory where the checkpoints are stored. ar_checkpoint_path (str, optional): The path to the autoregressive checkpoint. Defaults to None. diff_checkpoint_path (str, optional): The path to the diffusion checkpoint. Defaults to None. clvp_checkpoint_path (str, optional): The path to the CLVP checkpoint. Defaults to None. vocoder_checkpoint_path (str, optional): The path to the vocoder checkpoint. Defaults to None. eval (bool, optional): Whether to set the model to eval mode. Defaults to False. strict (bool, optional): Whether to load the model strictly. Defaults to True. """ if self.models_dir is None: self.models_dir = checkpoint_dir ar_path = ar_checkpoint_path or os.path.join(checkpoint_dir, "autoregressive.pth") diff_path = diff_checkpoint_path or os.path.join(checkpoint_dir, "diffusion_decoder.pth") clvp_path = clvp_checkpoint_path or os.path.join(checkpoint_dir, "clvp2.pth") vocoder_checkpoint_path = vocoder_checkpoint_path or os.path.join(checkpoint_dir, "vocoder.pth") self.mel_norm_path = os.path.join(checkpoint_dir, "mel_norms.pth") if os.path.exists(ar_path): # remove keys from the checkpoint that are not in the model checkpoint = torch.load(ar_path, map_location=torch.device("cpu")) # strict set False # due to removed `bias` and `masked_bias` changes in Transformers self.autoregressive.load_state_dict(checkpoint, strict=False) if os.path.exists(diff_path): self.diffusion.load_state_dict(torch.load(diff_path), strict=strict) if os.path.exists(clvp_path): self.clvp.load_state_dict(torch.load(clvp_path), strict=strict) if os.path.exists(vocoder_checkpoint_path): self.vocoder.load_state_dict( config.model_args.vocoder.value.optionally_index( torch.load( vocoder_checkpoint_path, map_location=torch.device("cpu"), ) ) ) if eval: self.autoregressive.post_init_gpt2_config(self.args.kv_cache) self.autoregressive.eval() self.diffusion.eval() self.clvp.eval() self.vocoder.eval() def train_step(self): raise NotImplementedError("Tortoise Training is not implemented")
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-electronjs_v10.0_multiarchpkg-win-amd64-opt.yml
build: template_file: test-win-opt-base.tyml dependencies: - "node-package-cpu" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${system.sox_win} && ${nodejs.win.prep_12} args: tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 10.0.0 16k" metadata: name: "DeepSpeech Windows AMD64 CPU ElectronJS MultiArch Package v10.0 tests" description: "Testing DeepSpeech for Windows/AMD64 on ElectronJS MultiArch Package v10.0, CPU only, optimized version"
0
coqui_public_repos/STT/native_client/dotnet/nupkg
coqui_public_repos/STT/native_client/dotnet/nupkg/build/STT.targets
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemGroup> <NativeLibs Include="$(MSBuildThisFileDirectory)\*.so" /> <None Include="@(NativeLibs)"> <Link>%(FileName)%(Extension)</Link> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> </None> </ItemGroup> </Project>
0