repo_id
stringlengths
18
103
file_path
stringlengths
30
136
content
stringlengths
2
3.36M
__index_level_0__
int64
0
0
coqui_public_repos/snakepit/src
coqui_public_repos/snakepit/src/models/State-model.js
const Sequelize = require('sequelize') const sequelize = require('./db.js') var State = sequelize.define('state', { id: { type: Sequelize.INTEGER, autoIncrement: true, primaryKey: true }, state: { type: Sequelize.INTEGER, allowNull: false }, since: { type: Sequelize.DATE, allowNull: false }, reason: { type: Sequelize.STRING, allowNull: true } }) module.exports = State
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/Makefile.am
if HAVE_COMPRESS compress_include_headers = fst/extensions/compress/compress.h \ fst/extensions/compress/compress-script.h fst/extensions/compress/gzfile.h \ fst/extensions/compress/elias.h fst/extensions/compress/randmod.h endif if HAVE_FAR far_include_headers = fst/extensions/far/compile-strings.h \ fst/extensions/far/create.h fst/extensions/far/equal.h \ fst/extensions/far/extract.h fst/extensions/far/far.h \ fst/extensions/far/far-class.h fst/extensions/far/farlib.h \ fst/extensions/far/farscript.h fst/extensions/far/getters.h \ fst/extensions/far/info.h fst/extensions/far/isomorphic.h \ fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \ fst/extensions/far/stlist.h fst/extensions/far/sttable.h endif if HAVE_LINEAR linear_include_headers = fst/extensions/linear/linear-fst-data-builder.h \ fst/extensions/linear/linear-fst-data.h fst/extensions/linear/linear-fst.h \ fst/extensions/linear/linearscript.h fst/extensions/linear/loglinear-apply.h \ fst/extensions/linear/trie.h endif if HAVE_MPDT mpdt_include_headers = fst/extensions/mpdt/compose.h \ fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \ fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \ fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \ fst/extensions/mpdt/reverse.h endif if HAVE_NGRAM ngram_include_headers = fst/extensions/ngram/bitmap-index.h \ fst/extensions/ngram/ngram-fst.h fst/extensions/ngram/nthbit.h endif if HAVE_PDT pdt_include_headers = fst/extensions/pdt/collection.h \ fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \ fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \ fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \ fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \ fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \ fst/extensions/pdt/shortest-path.h endif if HAVE_SPECIAL special_include_headers = fst/extensions/special/phi-fst.h \ fst/extensions/special/rho-fst.h fst/extensions/special/sigma-fst.h endif if HAVE_GRM far_include_headers = fst/extensions/far/compile-strings.h \ fst/extensions/far/create.h fst/extensions/far/equal.h \ fst/extensions/far/extract.h fst/extensions/far/far.h \ fst/extensions/far/far-class.h fst/extensions/far/farlib.h \ fst/extensions/far/farscript.h fst/extensions/far/getters.h \ fst/extensions/far/info.h fst/extensions/far/isomorphic.h \ fst/extensions/far/print-strings.h fst/extensions/far/script-impl.h \ fst/extensions/far/stlist.h fst/extensions/far/sttable.h mpdt_include_headers = fst/extensions/mpdt/compose.h \ fst/extensions/mpdt/expand.h fst/extensions/mpdt/info.h \ fst/extensions/mpdt/mpdt.h fst/extensions/mpdt/mpdtlib.h \ fst/extensions/mpdt/mpdtscript.h fst/extensions/mpdt/read_write_utils.h \ fst/extensions/mpdt/reverse.h pdt_include_headers = fst/extensions/pdt/collection.h \ fst/extensions/pdt/compose.h fst/extensions/pdt/expand.h \ fst/extensions/pdt/getters.h fst/extensions/pdt/info.h \ fst/extensions/pdt/paren.h fst/extensions/pdt/pdt.h \ fst/extensions/pdt/pdtlib.h fst/extensions/pdt/pdtscript.h \ fst/extensions/pdt/replace.h fst/extensions/pdt/reverse.h \ fst/extensions/pdt/shortest-path.h endif script_include_headers = fst/script/arc-class.h \ fst/script/arciterator-class.h fst/script/arcsort.h \ fst/script/arg-packs.h fst/script/closure.h fst/script/compile-impl.h \ fst/script/compile.h fst/script/compose.h fst/script/concat.h \ fst/script/connect.h fst/script/convert.h fst/script/decode.h \ fst/script/determinize.h fst/script/difference.h fst/script/disambiguate.h \ fst/script/draw-impl.h fst/script/draw.h fst/script/encode.h \ fst/script/encodemapper-class.h fst/script/epsnormalize.h fst/script/equal.h \ fst/script/equivalent.h fst/script/fst-class.h fst/script/fstscript.h \ fst/script/getters.h fst/script/info-impl.h fst/script/info.h \ fst/script/intersect.h fst/script/invert.h fst/script/isomorphic.h \ fst/script/map.h fst/script/minimize.h fst/script/print-impl.h \ fst/script/print.h fst/script/project.h fst/script/prune.h \ fst/script/push.h fst/script/randequivalent.h fst/script/randgen.h \ fst/script/register.h fst/script/relabel.h fst/script/replace.h \ fst/script/reverse.h fst/script/reweight.h fst/script/rmepsilon.h \ fst/script/script-impl.h fst/script/shortest-distance.h \ fst/script/shortest-path.h fst/script/stateiterator-class.h \ fst/script/synchronize.h fst/script/text-io.h fst/script/topsort.h \ fst/script/union.h fst/script/weight-class.h fst/script/fstscript-decl.h \ fst/script/verify.h test_include_headers = fst/test/algo_test.h fst/test/fst_test.h \ fst/test/rand-fst.h fst/test/weight-tester.h nobase_include_HEADERS = fst/accumulator.h fst/add-on.h fst/arc-arena.h \ fst/arc-map.h fst/arc.h fst/arcfilter.h fst/arcsort.h fst/bi-table.h \ fst/cache.h fst/closure.h fst/compact-fst.h fst/compat.h fst/complement.h \ fst/compose-filter.h fst/compose.h fst/concat.h fst/config.h fst/connect.h \ fst/const-fst.h fst/determinize.h fst/dfs-visit.h fst/difference.h \ fst/disambiguate.h fst/edit-fst.h fst/encode.h fst/epsnormalize.h fst/equal.h \ fst/equivalent.h fst/expanded-fst.h fst/expectation-weight.h \ fst/factor-weight.h fst/filter-state.h fst/flags.h fst/float-weight.h \ fst/fst-decl.h fst/fst.h fst/fstlib.h fst/generic-register.h fst/heap.h \ fst/icu.h fst/intersect.h fst/interval-set.h fst/invert.h fst/isomorphic.h \ fst/label-reachable.h fst/lexicographic-weight.h fst/lock.h fst/log.h \ fst/lookahead-filter.h fst/lookahead-matcher.h fst/map.h fst/mapped-file.h \ fst/matcher-fst.h fst/matcher.h fst/memory.h fst/minimize.h fst/mutable-fst.h \ fst/pair-weight.h fst/partition.h fst/power-weight.h fst/product-weight.h \ fst/project.h fst/properties.h fst/prune.h fst/push.h fst/queue.h \ fst/randequivalent.h fst/randgen.h fst/rational.h fst/register.h \ fst/relabel.h fst/replace-util.h fst/replace.h fst/reverse.h fst/reweight.h \ fst/rmepsilon.h fst/rmfinalepsilon.h fst/set-weight.h fst/shortest-distance.h \ fst/shortest-path.h fst/signed-log-weight.h fst/sparse-power-weight.h \ fst/sparse-tuple-weight.h fst/state-map.h fst/state-reachable.h \ fst/state-table.h fst/statesort.h fst/string-weight.h fst/string.h \ fst/symbol-table-ops.h fst/symbol-table.h fst/synchronize.h \ fst/test-properties.h fst/topsort.h fst/tuple-weight.h fst/types.h \ fst/union-find.h fst/union-weight.h fst/union.h fst/util.h fst/vector-fst.h \ fst/verify.h fst/visit.h fst/weight.h \ $(compress_include_headers) \ $(far_include_headers) \ $(linear_include_headers) \ $(mpdt_include_headers) \ $(ngram_include_headers) \ $(pdt_include_headers) \ $(script_include_headers) \ $(special_include_headers) \ $(test_include_headers)
0
coqui_public_repos/STT/native_client/dotnet/STTWPF
coqui_public_repos/STT/native_client/dotnet/STTWPF/Properties/Settings.Designer.cs
//------------------------------------------------------------------------------ // <auto-generated> // This code was generated by a tool. // Runtime Version:4.0.30319.42000 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> //------------------------------------------------------------------------------ namespace STT.WPF.Properties { [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "17.2.0.0")] internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase { private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings()))); public static Settings Default { get { return defaultInstance; } } } }
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/android-apk-build.sh
#!/bin/bash set -xe arm_flavor=$1 source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh do_deepspeech_java_apk_build
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/far/strings.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <cmath> #include <string> #include <fst/flags.h> #include <fst/extensions/far/compile-strings.h> #include <fstream> DEFINE_string(far_field_separator, "\t", "Set of characters used as a separator between printed fields"); namespace fst { // Computes the minimal length required to encode each line number as a decimal // number. int KeySize(const char *filename) { std::ifstream istrm(filename); istrm.seekg(0); string s; int nline = 0; while (getline(istrm, s)) ++nline; istrm.seekg(0); return nline ? ceil(log10(nline + 1)) : 1; } } // namespace fst
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstrelabel.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_string(isymbols, "", "Input label symbol table"); DEFINE_string(osymbols, "", "Output label symbol table"); DEFINE_string(relabel_isymbols, "", "Input symbol set to relabel to"); DEFINE_string(relabel_osymbols, "", "Output symbol set to relabel to"); DEFINE_string(relabel_ipairs, "", "Input relabel pairs (numeric)"); DEFINE_string(relabel_opairs, "", "Output relabel pairs (numeric)"); DEFINE_string(unknown_isymbol, "", "Input symbol to use to relabel OOVs (default: OOVs are errors)"); DEFINE_string(unknown_osymbol, "", "Output symbol to use to relabel OOVs (default: OOVs are errors)"); DEFINE_bool(allow_negative_labels, false, "Allow negative labels (not recommended; may cause conflicts)"); int fstrelabel_main(int argc, char **argv); int main(int argc, char **argv) { return fstrelabel_main(argc, argv); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/far/strings.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <cmath> #include <string> #include <fst/flags.h> #include <fst/extensions/far/compile-strings.h> #include <fstream> DEFINE_string(far_field_separator, "\t", "Set of characters used as a separator between printed fields"); namespace fst { // Computes the minimal length required to encode each line number as a decimal // number. int KeySize(const char *filename) { std::ifstream istrm(filename); istrm.seekg(0); string s; int nline = 0; while (getline(istrm, s)) ++nline; istrm.seekg(0); return nline ? ceil(log10(nline + 1)) : 1; } } // namespace fst
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/murmur_hash.hh
#ifndef UTIL_MURMUR_HASH_H #define UTIL_MURMUR_HASH_H #include <cstddef> #include <stdint.h> namespace util { // 64-bit machine version uint64_t MurmurHash64A(const void * key, std::size_t len, uint64_t seed = 0); // 32-bit machine version (not the same function as above) uint64_t MurmurHash64B(const void * key, std::size_t len, uint64_t seed = 0); // Use the version for this arch. Because the values differ across // architectures, really only use it for in-memory structures. uint64_t MurmurHashNative(const void * key, std::size_t len, uint64_t seed = 0); } // namespace util #endif // UTIL_MURMUR_HASH_H
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/utils/languages.py
import os from typing import Any, Dict, List import fsspec import numpy as np import torch from coqpit import Coqpit from TTS.config import check_config_and_model_args from TTS.tts.utils.managers import BaseIDManager class LanguageManager(BaseIDManager): """Manage the languages for multi-lingual 🐸TTS models. Load a datafile and parse the information in a way that can be queried by language. Args: language_ids_file_path (str, optional): Path to the metafile that maps language names to ids used by TTS models. Defaults to "". config (Coqpit, optional): Coqpit config that contains the language information in the datasets filed. Defaults to None. Examples: >>> manager = LanguageManager(language_ids_file_path=language_ids_file_path) >>> language_id_mapper = manager.language_ids """ def __init__( self, language_ids_file_path: str = "", config: Coqpit = None, ): super().__init__(id_file_path=language_ids_file_path) if config: self.set_language_ids_from_config(config) @property def num_languages(self) -> int: return len(list(self.name_to_id.keys())) @property def language_names(self) -> List: return list(self.name_to_id.keys()) @staticmethod def parse_language_ids_from_config(c: Coqpit) -> Dict: """Set language id from config. Args: c (Coqpit): Config Returns: Tuple[Dict, int]: Language ID mapping and the number of languages. """ languages = set({}) for dataset in c.datasets: if "language" in dataset: languages.add(dataset["language"]) else: raise ValueError(f"Dataset {dataset['name']} has no language specified.") return {name: i for i, name in enumerate(sorted(list(languages)))} def set_language_ids_from_config(self, c: Coqpit) -> None: """Set language IDs from config samples. Args: c (Coqpit): Config. """ self.name_to_id = self.parse_language_ids_from_config(c) @staticmethod def parse_ids_from_data(items: List, parse_key: str) -> Any: raise NotImplementedError def set_ids_from_data(self, items: List, parse_key: str) -> Any: raise NotImplementedError def save_ids_to_file(self, file_path: str) -> None: """Save language IDs to a json file. Args: file_path (str): Path to the output file. """ self._save_json(file_path, self.name_to_id) @staticmethod def init_from_config(config: Coqpit) -> "LanguageManager": """Initialize the language manager from a Coqpit config. Args: config (Coqpit): Coqpit config. """ language_manager = None if check_config_and_model_args(config, "use_language_embedding", True): if config.get("language_ids_file", None): language_manager = LanguageManager(language_ids_file_path=config.language_ids_file) language_manager = LanguageManager(config=config) return language_manager def _set_file_path(path): """Find the language_ids.json under the given path or the above it. Intended to band aid the different paths returned in restored and continued training.""" path_restore = os.path.join(os.path.dirname(path), "language_ids.json") path_continue = os.path.join(path, "language_ids.json") fs = fsspec.get_mapper(path).fs if fs.exists(path_restore): return path_restore if fs.exists(path_continue): return path_continue return None def get_language_balancer_weights(items: list): language_names = np.array([item["language"] for item in items]) unique_language_names = np.unique(language_names).tolist() language_ids = [unique_language_names.index(l) for l in language_names] language_count = np.array([len(np.where(language_names == l)[0]) for l in unique_language_names]) weight_language = 1.0 / language_count # get weight for each sample dataset_samples_weight = np.array([weight_language[l] for l in language_ids]) # normalize dataset_samples_weight = dataset_samples_weight / np.linalg.norm(dataset_samples_weight) return torch.from_numpy(dataset_samples_weight).float()
0
coqui_public_repos/TTS/TTS/vocoder
coqui_public_repos/TTS/TTS/vocoder/configs/fullband_melgan_config.py
from dataclasses import dataclass, field from .shared_configs import BaseGANVocoderConfig @dataclass class FullbandMelganConfig(BaseGANVocoderConfig): """Defines parameters for FullBand MelGAN vocoder. Example: >>> from TTS.vocoder.configs import FullbandMelganConfig >>> config = FullbandMelganConfig() Args: model (str): Model name used for selecting the right model at initialization. Defaults to `fullband_melgan`. discriminator_model (str): One of the discriminators from `TTS.vocoder.models.*_discriminator`. Defaults to 'melgan_multiscale_discriminator`. discriminator_model_params (dict): The discriminator model parameters. Defaults to '{"base_channels": 16, "max_channels": 1024, "downsample_factors": [4, 4, 4, 4]}` generator_model (str): One of the generators from TTS.vocoder.models.*`. Every other non-GAN vocoder model is considered as a generator too. Defaults to `melgan_generator`. batch_size (int): Batch size used at training. Larger values use more memory. Defaults to 16. seq_len (int): Audio segment length used at training. Larger values use more memory. Defaults to 8192. pad_short (int): Additional padding applied to the audio samples shorter than `seq_len`. Defaults to 0. use_noise_augment (bool): enable / disable random noise added to the input waveform. The noise is added after computing the features. Defaults to True. use_cache (bool): enable / disable in memory caching of the computed features. It can cause OOM error if the system RAM is not large enough. Defaults to True. use_stft_loss (bool): enable / disable use of STFT loss originally used by ParallelWaveGAN model. Defaults to True. use_subband_stft (bool): enable / disable use of subband loss computation originally used by MultiBandMelgan model. Defaults to True. use_mse_gan_loss (bool): enable / disable using Mean Squeare Error GAN loss. Defaults to True. use_hinge_gan_loss (bool): enable / disable using Hinge GAN loss. You should choose either Hinge or MSE loss for training GAN models. Defaults to False. use_feat_match_loss (bool): enable / disable using Feature Matching loss originally used by MelGAN model. Defaults to True. use_l1_spec_loss (bool): enable / disable using L1 spectrogram loss originally used by HifiGAN model. Defaults to False. stft_loss_params (dict): STFT loss parameters. Default to `{"n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240]}` stft_loss_weight (float): STFT loss weight that multiplies the computed loss before summing up the total model loss. Defaults to 0.5. subband_stft_loss_weight (float): Subband STFT loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. mse_G_loss_weight (float): MSE generator loss weight that multiplies the computed loss before summing up the total loss. faults to 2.5. hinge_G_loss_weight (float): Hinge generator loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. feat_match_loss_weight (float): Feature matching loss weight that multiplies the computed loss before summing up the total loss. faults to 108. l1_spec_loss_weight (float): L1 spectrogram loss weight that multiplies the computed loss before summing up the total loss. Defaults to 0. """ model: str = "fullband_melgan" # Model specific params discriminator_model: str = "melgan_multiscale_discriminator" discriminator_model_params: dict = field( default_factory=lambda: {"base_channels": 16, "max_channels": 512, "downsample_factors": [4, 4, 4]} ) generator_model: str = "melgan_generator" generator_model_params: dict = field( default_factory=lambda: {"upsample_factors": [8, 8, 2, 2], "num_res_blocks": 4} ) # Training - overrides batch_size: int = 16 seq_len: int = 8192 pad_short: int = 2000 use_noise_augment: bool = True use_cache: bool = True # LOSS PARAMETERS - overrides use_stft_loss: bool = True use_subband_stft_loss: bool = False use_mse_gan_loss: bool = True use_hinge_gan_loss: bool = False use_feat_match_loss: bool = True # requires MelGAN Discriminators (MelGAN and HifiGAN) use_l1_spec_loss: bool = False stft_loss_params: dict = field( default_factory=lambda: { "n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240], } ) # loss weights - overrides stft_loss_weight: float = 0.5 subband_stft_loss_weight: float = 0 mse_G_loss_weight: float = 2.5 hinge_G_loss_weight: float = 0 feat_match_loss_weight: float = 108 l1_spec_loss_weight: float = 0.0
0
coqui_public_repos/STT-models/english/coqui
coqui_public_repos/STT-models/english/coqui/v1.0.0-checkpoints/MODEL_CARD.md
# Model card for English STT v1.0.0 Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Maintained by [Coqui](https://coqui.ai/). - Model language: English / English / `en` - Model date: October 3, 2021 - Model type: `Speech-to-Text` - Model version: `v1.0.0` - Compatible with 🐸 STT version: `v1.0.0` - License: Apache 2.0 - Citation details: `@techreport{english-stt, author = {Coqui}, title = {English STT v1.0.0}, institution = {Coqui}, address = {\url{https://coqui.ai/models}} year = {2021}, month = {October}, number = {STT-EN-1.0.0} }` - Where to send questions or comments about the model: You can leave an issue on [`STT` issues](https://github.com/coqui-ai/STT/issues), open a new discussion on [`STT` discussions](https://github.com/coqui-ai/STT/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [English Language](https://en.wikipedia.org/wiki/English_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The Librispeech test sets are standard, but in order to create a more rigorous Common Voice test set, we made custom splits. The CV {train/dev/test} sets were allowed to have repeating sentences, with the requirement that any given sentence could not occur in more than one split. Without any language model: - Librispeech clean: WER: 7.7\%, CER: 2.3\% - Librispeech other: WER: 22.0\%, CER: 8.2\% - Common Voice 7.0 (Coqui custom test): WER: 55.7\%, CER: 24.3\% Compared to previous v0.9.3 STT (for both Coqui and Mozilla DeepSpeech): - Librispeech clean: WER: 14.5\%, CER: 4.5\% - Librispeech other: WER: 32.4\%, CER: 12.7\% - Common Voice 7.0 (Coqui custom test): WER: 63.8\%, CER: 28.9\% #### Model Size For STT, you always must deploy an acoustic model, and it is often the case you also will want to deploy an application-specific language model. The acoustic model here is an un-formatted, TensorFlow checkpoint representation, which is most useful for fine-tuning to custom datasets. |Model type|Vocabulary|Filename|Size| ----------------|-----|----------------|-----| |Acoustic model Checkpoints | open | `best_dev-3663881.*` | 677M| ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on the following corpora: Common Voice 7.0 English (custom Coqui train/dev/test splits), LibriSpeech, and Multilingual Librispeech. In total approximately ~47,000 hours of data. ## Evaluation data The validation ("dev") sets came from CV, Librispeech, and MLS. Testing accuracy is reported for MLS and Librispeech. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/tensorflow/mfcc.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Basic class for computing MFCCs from spectrogram slices. #ifndef TENSORFLOW_CORE_KERNELS_MFCC_H_ #define TENSORFLOW_CORE_KERNELS_MFCC_H_ #include <vector> #include "mfcc_dct.h" #include "mfcc_mel_filterbank.h" #define TF_DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&) = delete; \ void operator=(const TypeName&) = delete namespace tensorflow { class Mfcc { public: Mfcc(); bool Initialize(int input_length, double input_sample_rate); // Input is a single squared-magnitude spectrogram frame. The input spectrum // is converted to linear magnitude and weighted into bands using a // triangular mel filterbank, and a discrete cosine transform (DCT) of the // values is taken. Output is populated with the lowest dct_coefficient_count // of these values. void Compute(const std::vector<double>& spectrogram_frame, std::vector<double>* output) const; void set_upper_frequency_limit(double upper_frequency_limit) { assert(!initialized_); // "Set frequency limits before calling Initialize." upper_frequency_limit_ = upper_frequency_limit; } void set_lower_frequency_limit(double lower_frequency_limit) { assert(!initialized_); // "Set frequency limits before calling Initialize."; lower_frequency_limit_ = lower_frequency_limit; } void set_filterbank_channel_count(int filterbank_channel_count) { assert(!initialized_); // "Set channel count before calling Initialize."; filterbank_channel_count_ = filterbank_channel_count; } void set_dct_coefficient_count(int dct_coefficient_count) { assert(!initialized_); // "Set coefficient count before calling Initialize."; dct_coefficient_count_ = dct_coefficient_count; } private: MfccMelFilterbank mel_filterbank_; MfccDct dct_; bool initialized_; double lower_frequency_limit_; double upper_frequency_limit_; int filterbank_channel_count_; int dct_coefficient_count_; TF_DISALLOW_COPY_AND_ASSIGN(Mfcc); }; } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_MFCC_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstrelabel.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_string(isymbols, "", "Input label symbol table"); DEFINE_string(osymbols, "", "Output label symbol table"); DEFINE_string(relabel_isymbols, "", "Input symbol set to relabel to"); DEFINE_string(relabel_osymbols, "", "Output symbol set to relabel to"); DEFINE_string(relabel_ipairs, "", "Input relabel pairs (numeric)"); DEFINE_string(relabel_opairs, "", "Output relabel pairs (numeric)"); DEFINE_string(unknown_isymbol, "", "Input symbol to use to relabel OOVs (default: OOVs are errors)"); DEFINE_string(unknown_osymbol, "", "Output symbol to use to relabel OOVs (default: OOVs are errors)"); DEFINE_bool(allow_negative_labels, false, "Allow negative labels (not recommended; may cause conflicts)"); int fstrelabel_main(int argc, char **argv); int main(int argc, char **argv) { return fstrelabel_main(argc, argv); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/compact/compact16_weighted_string-fst.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/fst.h> #include <fst/compact-fst.h> namespace fst { static FstRegisterer< CompactWeightedStringFst<StdArc, uint16>> CompactWeightedStringFst_StdArc_uint16_registerer; static FstRegisterer< CompactWeightedStringFst<LogArc, uint16>> CompactWeightedStringFst_LogArc_uint16_registerer; } // namespace fst
0
coqui_public_repos/STT-examples/django_api_streaming/stt_app
coqui_public_repos/STT-examples/django_api_streaming/stt_app/stt/STT.py
import scipy.io.wavfile as wav from stt_app.apps import STT from stt_app.config import config from stt_app import logging audiolength = float(config.ConfigSTT().get_config("audiofilelength")) def stt(audioPath, from_websocket=False): try: logging.log("Inside stt function", "info") text = "" fs, audio = wav.read(audioPath) if fs == 16000: if from_websocket or check_audio_lenth(len(audio)): logging.log("Starting transcribing...", "info") text = STT.stt_model.stt(audio) logging.log("Audio transcribed.", "info") elif not from_websocket: text = "Audio should be less than " + str(audiolength) + " seconds." else: text = "Frame rate of submitted audio should be 16000 kHz." #print('after inference: %s' % text) except Exception as err: logging.log("exception occurred: {0}".format(err), "error") text = "Some error occurred while transcribing." return text def check_audio_lenth(len_audio): len_audio = len_audio / 16000 if len_audio > audiolength: return False; else: return True;
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/stateiterator-class.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/script-impl.h> #include <fst/script/stateiterator-class.h> namespace fst { namespace script { StateIteratorClass::StateIteratorClass(const FstClass &fst) : impl_(nullptr) { InitStateIteratorClassArgs args(fst, this); Apply<Operation<InitStateIteratorClassArgs>>("InitStateIteratorClass", fst.ArcType(), &args); } REGISTER_FST_OPERATION(InitStateIteratorClass, StdArc, InitStateIteratorClassArgs); REGISTER_FST_OPERATION(InitStateIteratorClass, LogArc, InitStateIteratorClassArgs); REGISTER_FST_OPERATION(InitStateIteratorClass, Log64Arc, InitStateIteratorClassArgs); } // namespace script } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstisomorphic-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Two FSTS are isomorphic (equal up to state and arc re-ordering) iff their // exit status is zero. FSTs should be deterministic when viewed as unweighted // automata. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/log.h> #include <fst/script/isomorphic.h> DECLARE_double(delta); int fstisomorphic_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; string usage = "Two FSTs are isomorphic iff the exit status is zero.\n\n Usage: "; usage += argv[0]; usage += " in1.fst in2.fst\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc != 3) { ShowUsage(); return 1; } const string in1_name = strcmp(argv[1], "-") == 0 ? "" : argv[1]; const string in2_name = strcmp(argv[2], "-") == 0 ? "" : argv[2]; if (in1_name.empty() && in2_name.empty()) { LOG(ERROR) << argv[0] << ": Can't take both inputs from standard input"; return 1; } std::unique_ptr<FstClass> ifst1(FstClass::Read(in1_name)); if (!ifst1) return 1; std::unique_ptr<FstClass> ifst2(FstClass::Read(in2_name)); if (!ifst2) return 1; bool result = s::Isomorphic(*ifst1, *ifst2, FLAGS_delta); if (!result) VLOG(1) << "FSTs are not isomorphic"; return result ? 0 : 2; }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/far/extract.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Extracts component FSTs from an finite-state archive. #ifndef FST_EXTENSIONS_FAR_EXTRACT_H_ #define FST_EXTENSIONS_FAR_EXTRACT_H_ #include <memory> #include <string> #include <vector> #include <fst/extensions/far/far.h> #include <fst/util.h> namespace fst { template <class Arc> inline void FarWriteFst(const Fst<Arc> *fst, string key, string *okey, int *nrep, int32_t generate_filenames, int i, const string &filename_prefix, const string &filename_suffix) { if (key == *okey) { ++*nrep; } else { *nrep = 0; } *okey = key; string ofilename; if (generate_filenames) { std::ostringstream tmp; tmp.width(generate_filenames); tmp.fill('0'); tmp << i; ofilename = tmp.str(); } else { if (*nrep > 0) { std::ostringstream tmp; tmp << '.' << nrep; key.append(tmp.str().data(), tmp.str().size()); } ofilename = key; } fst->Write(filename_prefix + ofilename + filename_suffix); } template <class Arc> void FarExtract(const std::vector<string> &ifilenames, int32_t generate_filenames, const string &keys, const string &key_separator, const string &range_delimiter, const string &filename_prefix, const string &filename_suffix) { std::unique_ptr<FarReader<Arc>> far_reader( FarReader<Arc>::Open(ifilenames)); if (!far_reader) return; string okey; int nrep = 0; std::vector<char *> key_vector; // User has specified a set of FSTs to extract, where some of these may in // fact be ranges. if (!keys.empty()) { auto *keys_cstr = new char[keys.size() + 1]; strcpy(keys_cstr, keys.c_str()); SplitString(keys_cstr, key_separator.c_str(), &key_vector, true); int i = 0; for (size_t k = 0; k < key_vector.size(); ++k, ++i) { string key = key_vector[k]; auto *key_cstr = new char[key.size() + 1]; strcpy(key_cstr, key.c_str()); std::vector<char *> range_vector; SplitString(key_cstr, range_delimiter.c_str(), &range_vector, false); if (range_vector.size() == 1) { // Not a range if (!far_reader->Find(key)) { LOG(ERROR) << "FarExtract: Cannot find key " << key; return; } const auto *fst = far_reader->GetFst(); FarWriteFst(fst, key, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } else if (range_vector.size() == 2) { // A legal range string begin_key = range_vector[0]; string end_key = range_vector[1]; if (begin_key.empty() || end_key.empty()) { LOG(ERROR) << "FarExtract: Illegal range specification " << key; return; } if (!far_reader->Find(begin_key)) { LOG(ERROR) << "FarExtract: Cannot find key " << begin_key; return; } for (; !far_reader->Done(); far_reader->Next(), ++i) { const auto &ikey = far_reader->GetKey(); if (end_key < ikey) break; const auto *fst = far_reader->GetFst(); FarWriteFst(fst, ikey, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } } else { LOG(ERROR) << "FarExtract: Illegal range specification " << key; return; } delete[] key_cstr; } delete[] keys_cstr; return; } // Nothing specified, so just extracts everything. for (size_t i = 1; !far_reader->Done(); far_reader->Next(), ++i) { const auto &key = far_reader->GetKey(); const auto *fst = far_reader->GetFst(); FarWriteFst(fst, key, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } return; } } // namespace fst #endif // FST_EXTENSIONS_FAR_EXTRACT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/lib/symbol-table-ops.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // #include <fst/symbol-table-ops.h> #include <string> namespace fst { SymbolTable *MergeSymbolTable(const SymbolTable &left, const SymbolTable &right, bool *right_relabel_output) { // MergeSymbolTable detects several special cases. It will return a reference // copied version of SymbolTable of left or right if either symbol table is // a superset of the other. std::unique_ptr<SymbolTable> merged( new SymbolTable("merge_" + left.Name() + "_" + right.Name())); // Copies everything from the left symbol table. bool left_has_all = true; bool right_has_all = true; bool relabel = false; for (SymbolTableIterator liter(left); !liter.Done(); liter.Next()) { merged->AddSymbol(liter.Symbol(), liter.Value()); if (right_has_all) { int64 key = right.Find(liter.Symbol()); if (key == -1) { right_has_all = false; } else if (!relabel && key != liter.Value()) { relabel = true; } } } if (right_has_all) { if (right_relabel_output) *right_relabel_output = relabel; return right.Copy(); } // add all symbols we can from right symbol table std::vector<string> conflicts; for (SymbolTableIterator riter(right); !riter.Done(); riter.Next()) { int64 key = merged->Find(riter.Symbol()); if (key != -1) { // Symbol already exists, maybe with different value if (key != riter.Value()) relabel = true; continue; } // Symbol doesn't exist from left left_has_all = false; if (!merged->Find(riter.Value()).empty()) { // we can't add this where we want to, add it later, in order conflicts.push_back(riter.Symbol()); continue; } // there is a hole and we can add this symbol with its id merged->AddSymbol(riter.Symbol(), riter.Value()); } if (right_relabel_output) *right_relabel_output = relabel; if (left_has_all) return left.Copy(); // Add all symbols that conflicted, in order for (const auto &conflict : conflicts) merged->AddSymbol(conflict); return merged.release(); } SymbolTable *CompactSymbolTable(const SymbolTable &syms) { std::map<int64, string> sorted; SymbolTableIterator stiter(syms); for (; !stiter.Done(); stiter.Next()) { sorted[stiter.Value()] = stiter.Symbol(); } auto *compact = new SymbolTable(syms.Name() + "_compact"); int64 newkey = 0; for (const auto &kv : sorted) compact->AddSymbol(kv.second, newkey++); return compact; } SymbolTable *FstReadSymbols(const string &filename, bool input_symbols) { std::ifstream in(filename, std::ios_base::in | std::ios_base::binary); if (!in) { LOG(ERROR) << "FstReadSymbols: Can't open file " << filename; return nullptr; } FstHeader hdr; if (!hdr.Read(in, filename)) { LOG(ERROR) << "FstReadSymbols: Couldn't read header from " << filename; return nullptr; } if (hdr.GetFlags() & FstHeader::HAS_ISYMBOLS) { std::unique_ptr<SymbolTable> isymbols(SymbolTable::Read(in, filename)); if (isymbols == nullptr) { LOG(ERROR) << "FstReadSymbols: Couldn't read input symbols from " << filename; return nullptr; } if (input_symbols) return isymbols.release(); } if (hdr.GetFlags() & FstHeader::HAS_OSYMBOLS) { std::unique_ptr<SymbolTable> osymbols(SymbolTable::Read(in, filename)); if (osymbols == nullptr) { LOG(ERROR) << "FstReadSymbols: Couldn't read output symbols from " << filename; return nullptr; } if (!input_symbols) return osymbols.release(); } LOG(ERROR) << "FstReadSymbols: The file " << filename << " doesn't contain the requested symbols"; return nullptr; } bool AddAuxiliarySymbols(const string &prefix, int64 start_label, int64 nlabels, SymbolTable *syms) { for (int64 i = 0; i < nlabels; ++i) { auto index = i + start_label; if (index != syms->AddSymbol(prefix + std::to_string(i), index)) { FSTERROR() << "AddAuxiliarySymbols: Symbol table clash"; return false; } } return true; } } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/text-io.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/text-io.h> #include <cstring> #include <fstream> #include <ostream> #include <sstream> #include <utility> #include <fst/log.h> #include <fstream> #include <fst/util.h> namespace fst { namespace script { // Reads vector of weights; returns true on success. bool ReadPotentials(const string &weight_type, const string &filename, std::vector<WeightClass> *potentials) { std::ifstream istrm(filename); if (!istrm.good()) { LOG(ERROR) << "ReadPotentials: Can't open file: " << filename; return false; } static constexpr int kLineLen = 8096; char line[kLineLen]; size_t nline = 0; potentials->clear(); while (!istrm.getline(line, kLineLen).fail()) { ++nline; std::vector<char *> col; SplitString(line, "\n\t ", &col, true); if (col.empty() || col[0][0] == '\0') continue; if (col.size() != 2) { FSTERROR() << "ReadPotentials: Bad number of columns, " << "file = " << filename << ", line = " << nline; return false; } const ssize_t s = StrToInt64(col[0], filename, nline, false); const WeightClass weight(weight_type, col[1]); while (potentials->size() <= s) { potentials->push_back(WeightClass::Zero(weight_type)); } potentials->back() = weight; } return true; } // Writes vector of weights; returns true on success. bool WritePotentials(const string &filename, const std::vector<WeightClass> &potentials) { std::ofstream ostrm; if (!filename.empty()) { ostrm.open(filename); if (!ostrm.good()) { LOG(ERROR) << "WritePotentials: Can't open file: " << filename; return false; } } std::ostream &strm = ostrm.is_open() ? ostrm : std::cout; strm.precision(9); for (size_t s = 0; s < potentials.size(); ++s) { strm << s << "\t" << potentials[s] << "\n"; } if (strm.fail()) { LOG(ERROR) << "WritePotentials: Write failed: " << (filename.empty() ? "standard output" : filename); return false; } return true; } } // namespace script } // namespace fst
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_13x_8k-linux-amd64-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "linux-amd64-cpu-opt" - "test-training_8k-linux-amd64-py36m-opt" test_model_task: "test-training_8k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_xenial.prep_13} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 13.x 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU NodeJS 13.x tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v13.x, CPU only, optimized version (8kHz)"
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/randgen.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/randgen.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void RandGen(const FstClass &ifst, MutableFstClass *ofst, time_t seed, const RandGenOptions<RandArcSelection> &opts) { if (!internal::ArcTypesMatch(ifst, *ofst, "RandGen")) { ofst->SetProperties(kError, kError); return; } RandGenArgs args(ifst, ofst, seed, opts); Apply<Operation<RandGenArgs>>("RandGen", ifst.ArcType(), &args); } REGISTER_FST_OPERATION(RandGen, StdArc, RandGenArgs); REGISTER_FST_OPERATION(RandGen, LogArc, RandGenArgs); REGISTER_FST_OPERATION(RandGen, Log64Arc, RandGenArgs); } // namespace script } // namespace fst
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/docs-package.sh
#!/bin/bash set -xe source $(dirname "$0")/tc-tests-utils.sh mkdir -p ${TASKCLUSTER_ARTIFACTS} || true cp ${DS_DSDIR}/doc/html.zip ${TASKCLUSTER_ARTIFACTS}/doc-html.zip
0
coqui_public_repos/inference-engine/src
coqui_public_repos/inference-engine/src/ctcdecode/scorer.cpp
#ifdef _MSC_VER #include <stdlib.h> #include <io.h> #include <windows.h> #define R_OK 4 /* Read permission. */ #define W_OK 2 /* Write permission. */ #define F_OK 0 /* Existence. */ #define access _access #else /* _MSC_VER */ #include <unistd.h> #endif #include "scorer.h" #include <iostream> #include <fstream> #include "lm/config.hh" #include "lm/model.hh" #include "lm/state.hh" #include "util/string_piece.hh" #include "decoder_utils.h" static const int32_t MAGIC = 'TRIE'; static const int32_t FILE_VERSION = 6; int Scorer::init(const std::string& lm_path, const Alphabet& alphabet) { set_alphabet(alphabet); return load_lm(lm_path); } int Scorer::init(const std::string& lm_path, const std::string& alphabet_config_path) { int err = alphabet_.init(alphabet_config_path.c_str()); if (err != 0) { return err; } setup_char_map(); return load_lm(lm_path); } void Scorer::set_alphabet(const Alphabet& alphabet) { alphabet_ = alphabet; setup_char_map(); } void Scorer::setup_char_map() { // (Re-)Initialize character map char_map_.clear(); SPACE_ID_ = alphabet_.GetSpaceLabel(); for (int i = 0; i < alphabet_.GetSize(); i++) { // The initial state of FST is state 0, hence the index of chars in // the FST should start from 1 to avoid the conflict with the initial // state, otherwise wrong decoding results would be given. char_map_[alphabet_.DecodeSingle(i)] = i + 1; } } int Scorer::load_lm(const std::string& lm_path) { // Check if file is readable to avoid KenLM throwing an exception const char* filename = lm_path.c_str(); if (access(filename, R_OK) != 0) { return COQUI_ERR_SCORER_UNREADABLE; } // Check if the file format is valid to avoid KenLM throwing an exception lm::ngram::ModelType model_type; if (!lm::ngram::RecognizeBinary(filename, model_type)) { return COQUI_ERR_SCORER_INVALID_LM; } // Load the LM lm::ngram::Config config; config.load_method = util::LoadMethod::LAZY; language_model_.reset(lm::ngram::LoadVirtual(filename, config)); max_order_ = language_model_->Order(); uint64_t package_size; { util::scoped_fd fd(util::OpenReadOrThrow(filename)); package_size = util::SizeFile(fd.get()); } uint64_t trie_offset = language_model_->GetEndOfSearchOffset(); if (package_size <= trie_offset) { // File ends without a trie structure return COQUI_ERR_SCORER_NO_TRIE; } // Read metadata and trie from file std::ifstream fin(lm_path, std::ios::binary); fin.seekg(trie_offset); return load_trie(fin, lm_path); } int Scorer::load_trie(std::ifstream& fin, const std::string& file_path) { int magic; fin.read(reinterpret_cast<char*>(&magic), sizeof(magic)); if (magic != MAGIC) { std::cerr << "Error: Can't parse scorer file, invalid header. Try updating " "your scorer file." << std::endl; return COQUI_ERR_SCORER_INVALID_TRIE; } int version; fin.read(reinterpret_cast<char*>(&version), sizeof(version)); if (version != FILE_VERSION) { std::cerr << "Error: Scorer file version mismatch (" << version << " instead of expected " << FILE_VERSION << "). "; if (version < FILE_VERSION) { std::cerr << "Update your scorer file."; } else { std::cerr << "Downgrade your scorer file or update your version of Coqui STT."; } std::cerr << std::endl; return COQUI_ERR_SCORER_VERSION_MISMATCH; } fin.read(reinterpret_cast<char*>(&is_utf8_mode_), sizeof(is_utf8_mode_)); // Read hyperparameters from header double alpha, beta; fin.read(reinterpret_cast<char*>(&alpha), sizeof(alpha)); fin.read(reinterpret_cast<char*>(&beta), sizeof(beta)); reset_params(alpha, beta); fst::FstReadOptions opt; opt.mode = fst::FstReadOptions::MAP; opt.source = file_path; dictionary.reset(FstType::Read(fin, opt)); return COQUI_ERR_OK; } bool Scorer::save_dictionary(const std::string& path, bool append_instead_of_overwrite) { std::ios::openmode om; if (append_instead_of_overwrite) { om = std::ios::in|std::ios::out|std::ios::binary|std::ios::ate; } else { om = std::ios::out|std::ios::binary; } std::fstream fout(path, om); if (!fout ||fout.bad()) { std::cerr << "Error opening '" << path << "'" << std::endl; return false; } fout.write(reinterpret_cast<const char*>(&MAGIC), sizeof(MAGIC)); if (fout.bad()) { std::cerr << "Error writing MAGIC '" << path << "'" << std::endl; return false; } fout.write(reinterpret_cast<const char*>(&FILE_VERSION), sizeof(FILE_VERSION)); if (fout.bad()) { std::cerr << "Error writing FILE_VERSION '" << path << "'" << std::endl; return false; } fout.write(reinterpret_cast<const char*>(&is_utf8_mode_), sizeof(is_utf8_mode_)); if (fout.bad()) { std::cerr << "Error writing is_utf8_mode '" << path << "'" << std::endl; return false; } fout.write(reinterpret_cast<const char*>(&alpha), sizeof(alpha)); if (fout.bad()) { std::cerr << "Error writing alpha '" << path << "'" << std::endl; return false; } fout.write(reinterpret_cast<const char*>(&beta), sizeof(beta)); if (fout.bad()) { std::cerr << "Error writing beta '" << path << "'" << std::endl; return false; } fst::FstWriteOptions opt; opt.align = true; opt.source = path; return dictionary->Write(fout, opt); } bool Scorer::is_scoring_boundary(PathTrie* prefix, size_t new_label) { if (is_utf8_mode()) { if (prefix->character == -1) { return false; } unsigned char first_byte; int distance_to_boundary = prefix->distance_to_codepoint_boundary(&first_byte, alphabet_); int needed_bytes; if ((first_byte >> 3) == 0x1E) { needed_bytes = 4; } else if ((first_byte >> 4) == 0x0E) { needed_bytes = 3; } else if ((first_byte >> 5) == 0x06) { needed_bytes = 2; } else if ((first_byte >> 7) == 0x00) { needed_bytes = 1; } else { assert(false); // invalid byte sequence. should be unreachable, disallowed by vocabulary/trie return false; } return distance_to_boundary == needed_bytes; } else { return new_label == SPACE_ID_; } } double Scorer::get_log_cond_prob(const std::vector<std::string>& words, bool bos, bool eos) { return get_log_cond_prob(words.begin(), words.end(), bos, eos); } double Scorer::get_log_cond_prob(const std::vector<std::string>::const_iterator& begin, const std::vector<std::string>::const_iterator& end, bool bos, bool eos) { const auto& vocab = language_model_->BaseVocabulary(); lm::ngram::State state_vec[2]; lm::ngram::State *in_state = &state_vec[0]; lm::ngram::State *out_state = &state_vec[1]; if (bos) { language_model_->BeginSentenceWrite(in_state); } else { language_model_->NullContextWrite(in_state); } double cond_prob = 0.0; for (auto it = begin; it != end; ++it) { lm::WordIndex word_index = vocab.Index(*it); // encounter OOV if (word_index == lm::kUNK) { return OOV_SCORE; } cond_prob = language_model_->BaseScore(in_state, word_index, out_state); std::swap(in_state, out_state); } if (eos) { cond_prob = language_model_->BaseScore(in_state, vocab.EndSentence(), out_state); } // return loge prob return cond_prob/NUM_FLT_LOGE; } void Scorer::reset_params(float alpha, float beta) { this->alpha = alpha; this->beta = beta; } std::vector<std::string> Scorer::split_labels_into_scored_units(const std::vector<unsigned int>& labels) { if (labels.empty()) return {}; std::string s = alphabet_.Decode(labels); std::vector<std::string> words; if (is_utf8_mode_) { words = split_into_codepoints(s); } else { words = split_str(s, " "); } return words; } std::vector<std::string> Scorer::make_ngram(PathTrie* prefix) { std::vector<std::string> ngram; PathTrie* current_node = prefix; PathTrie* new_node = nullptr; for (int order = 0; order < max_order_; order++) { if (!current_node || current_node->character == -1) { break; } std::vector<unsigned int> prefix_vec; if (is_utf8_mode_) { new_node = current_node->get_prev_grapheme(prefix_vec, alphabet_); } else { new_node = current_node->get_prev_word(prefix_vec, alphabet_); } current_node = new_node->parent; // reconstruct word std::string word = alphabet_.Decode(prefix_vec); ngram.push_back(word); } std::reverse(ngram.begin(), ngram.end()); return ngram; } void Scorer::fill_dictionary(const std::unordered_set<std::string>& vocabulary) { // ConstFst is immutable, so we need to use a MutableFst to create the trie, // and then we convert to a ConstFst for the decoder and for storing on disk. fst::StdVectorFst dictionary; // For each unigram convert to ints and put in trie for (const auto& word : vocabulary) { if (word != START_TOKEN && word != UNK_TOKEN && word != END_TOKEN) { add_word_to_dictionary(word, char_map_, is_utf8_mode_, SPACE_ID_ + 1, &dictionary); } } /* Simplify FST * This gets rid of "epsilon" transitions in the FST. * These are transitions that don't require a string input to be taken. * Getting rid of them is necessary to make the FST deterministic, but * can greatly increase the size of the FST */ fst::RmEpsilon(&dictionary); std::unique_ptr<fst::StdVectorFst> new_dict(new fst::StdVectorFst); /* This makes the FST deterministic, meaning for any string input there's * only one possible state the FST could be in. It is assumed our * dictionary is deterministic when using it. * (lest we'd have to check for multiple transitions at each state) */ fst::Determinize(dictionary, new_dict.get()); /* Finds the simplest equivalent fst. This is unnecessary but decreases * memory usage of the dictionary */ fst::Minimize(new_dict.get()); // Now we convert the MutableFst to a ConstFst (Scorer::FstType) via its ctor std::unique_ptr<FstType> converted(new FstType(*new_dict)); this->dictionary = std::move(converted); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/linear/fstlinear.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/extensions/linear/linearscript.h> #include <fst/flags.h> DEFINE_string(arc_type, "standard", "Output arc type"); DEFINE_string(epsilon_symbol, "<eps>", "Epsilon symbol"); DEFINE_string(unknown_symbol, "<unk>", "Unknown word symbol"); DEFINE_string(vocab, "", "Path to the vocabulary file"); DEFINE_string(out, "", "Path to the output binary"); DEFINE_string(save_isymbols, "", "Save input symbol table to file"); DEFINE_string(save_fsymbols, "", "Save feature symbol table to file"); DEFINE_string(save_osymbols, "", "Save output symbol table to file"); int main(int argc, char **argv) { // TODO(wuke): more detailed usage std::set_new_handler(FailedNewHandler); SET_FLAGS(argv[0], &argc, &argv, true); fst::script::ValidateDelimiter(); fst::script::ValidateEmptySymbol(); if (argc == 1) { ShowUsage(); return 1; } fst::script::LinearCompile(FLAGS_arc_type, FLAGS_epsilon_symbol, FLAGS_unknown_symbol, FLAGS_vocab, argv + 1, argc - 1, FLAGS_out, FLAGS_save_isymbols, FLAGS_save_fsymbols, FLAGS_save_osymbols); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/symbol-table.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes to provide symbol-to-integer and integer-to-symbol mappings. #ifndef FST_SYMBOL_TABLE_H_ #define FST_SYMBOL_TABLE_H_ #include <cstring> #include <functional> #include <ios> #include <iostream> #include <memory> #include <sstream> #include <string> #include <utility> #include <vector> #include <fst/compat.h> #include <fst/flags.h> #include <fst/log.h> #include <fstream> #include <map> DECLARE_bool(fst_compat_symbols); namespace fst { constexpr int64 kNoSymbol = -1; // WARNING: Reading via symbol table read options should // not be used. This is a temporary work around for // reading symbol ranges of previously stored symbol sets. struct SymbolTableReadOptions { SymbolTableReadOptions() {} SymbolTableReadOptions( std::vector<std::pair<int64, int64>> string_hash_ranges, const string &source) : string_hash_ranges(std::move(string_hash_ranges)), source(source) {} std::vector<std::pair<int64, int64>> string_hash_ranges; string source; }; struct SymbolTableTextOptions { explicit SymbolTableTextOptions(bool allow_negative_labels = false); bool allow_negative_labels; string fst_field_separator; }; namespace internal { // List of symbols with a dense hash for looking up symbol index. // Hash uses linear probe, rehashes at 0.75% occupancy, avg 6 bytes overhead // per entry. Rehash in place from symbol list. // // Symbols are stored as c strings to avoid adding memory overhead, but the // performance penalty for this is high because rehash must call strlen on // every symbol. AddSymbol can be another 2x faster if symbol lengths were // stored. class DenseSymbolMap { public: DenseSymbolMap(); DenseSymbolMap(const DenseSymbolMap &x); ~DenseSymbolMap(); std::pair<int64, bool> InsertOrFind(const string &key); int64 Find(const string &key) const; const size_t size() const { return symbols_.size(); } const string GetSymbol(size_t idx) const { return string(symbols_[idx], strlen(symbols_[idx])); } void RemoveSymbol(size_t idx); private: // num_buckets must be power of 2. void Rehash(size_t num_buckets); const char* NewSymbol(const string &sym); int64 empty_; std::vector<const char *> symbols_; std::hash<string> str_hash_; std::vector<int64> buckets_; uint64 hash_mask_; }; class SymbolTableImpl { public: explicit SymbolTableImpl(const string &name) : name_(name), available_key_(0), dense_key_limit_(0), check_sum_finalized_(false) {} SymbolTableImpl(const SymbolTableImpl &impl) : name_(impl.name_), available_key_(impl.available_key_), dense_key_limit_(impl.dense_key_limit_), symbols_(impl.symbols_), idx_key_(impl.idx_key_), key_map_(impl.key_map_), check_sum_finalized_(false) {} int64 AddSymbol(const string &symbol, int64 key); int64 AddSymbol(const string &symbol) { return AddSymbol(symbol, available_key_); } // Removes the symbol with the given key. The removal is costly // (O(NumSymbols)) and may reduce the efficiency of Find() because of a // potentially reduced size of the dense key interval. void RemoveSymbol(int64 key); static SymbolTableImpl *ReadText( std::istream &strm, const string &name, const SymbolTableTextOptions &opts = SymbolTableTextOptions()); static SymbolTableImpl* Read(std::istream &strm, const SymbolTableReadOptions &opts); bool Write(std::ostream &strm) const; // Return the string associated with the key. If the key is out of // range (<0, >max), return an empty string. string Find(int64 key) const { int64 idx = key; if (key < 0 || key >= dense_key_limit_) { const auto it = key_map_.find(key); if (it == key_map_.end()) return ""; idx = it->second; } if (idx < 0 || idx >= symbols_.size()) return ""; return symbols_.GetSymbol(idx); } // Returns the key associated with the symbol; if the symbol // does not exists, returns kNoSymbol. int64 Find(const string &symbol) const { int64 idx = symbols_.Find(symbol); if (idx == kNoSymbol || idx < dense_key_limit_) return idx; return idx_key_[idx - dense_key_limit_]; } bool Member(int64 key) const { return !Find(key).empty(); } bool Member(const string &symbol) const { return Find(symbol) != kNoSymbol; } int64 GetNthKey(ssize_t pos) const { if (pos < 0 || pos >= symbols_.size()) return kNoSymbol; if (pos < dense_key_limit_) return pos; return Find(symbols_.GetSymbol(pos)); } const string &Name() const { return name_; } void SetName(const string &new_name) { name_ = new_name; } const string &CheckSum() const { MaybeRecomputeCheckSum(); return check_sum_string_; } const string &LabeledCheckSum() const { MaybeRecomputeCheckSum(); return labeled_check_sum_string_; } int64 AvailableKey() const { return available_key_; } size_t NumSymbols() const { return symbols_.size(); } private: // Recomputes the checksums (both of them) if we've had changes since the last // computation (i.e., if check_sum_finalized_ is false). // Takes ~2.5 microseconds (dbg) or ~230 nanoseconds (opt) on a 2.67GHz Xeon // if the checksum is up-to-date (requiring no recomputation). void MaybeRecomputeCheckSum() const; string name_; int64 available_key_; int64 dense_key_limit_; DenseSymbolMap symbols_; // Maps index to key for index >= dense_key_limit: // key = idx_key_[index - dense_key_limit] std::vector<int64> idx_key_; // Maps key to index for key >= dense_key_limit_. // index = key_map_[key] std::map<int64, int64> key_map_; mutable bool check_sum_finalized_; mutable string check_sum_string_; mutable string labeled_check_sum_string_; mutable Mutex check_sum_mutex_; }; } // namespace internal // Symbol (string) to integer (and reverse) mapping. // // The SymbolTable implements the mappings of labels to strings and reverse. // SymbolTables are used to describe the alphabet of the input and output // labels for arcs in a Finite State Transducer. // // SymbolTables are reference-counted and can therefore be shared across // multiple machines. For example a language model grammar G, with a // SymbolTable for the words in the language model can share this symbol // table with the lexical representation L o G. class SymbolTable { public: // Constructs symbol table with an optional name. explicit SymbolTable(const string &name = "<unspecified>") : impl_(std::make_shared<internal::SymbolTableImpl>(name)) {} virtual ~SymbolTable() {} // Reads a text representation of the symbol table from an istream. Pass a // name to give the resulting SymbolTable. static SymbolTable *ReadText( std::istream &strm, const string &name, const SymbolTableTextOptions &opts = SymbolTableTextOptions()) { auto *impl = internal::SymbolTableImpl::ReadText(strm, name, opts); return impl ? new SymbolTable(impl) : nullptr; } // Reads a text representation of the symbol table. static SymbolTable *ReadText(const string &filename, const SymbolTableTextOptions &opts = SymbolTableTextOptions()) { std::ifstream strm(filename, std::ios_base::in); if (!strm.good()) { LOG(ERROR) << "SymbolTable::ReadText: Can't open file " << filename; return nullptr; } return ReadText(strm, filename, opts); } // WARNING: Reading via symbol table read options should not be used. This is // a temporary work-around. static SymbolTable* Read(std::istream &strm, const SymbolTableReadOptions &opts) { auto *impl = internal::SymbolTableImpl::Read(strm, opts); return (impl) ? new SymbolTable(impl) : nullptr; } // Reads a binary dump of the symbol table from a stream. static SymbolTable *Read(std::istream &strm, const string &source) { SymbolTableReadOptions opts; opts.source = source; return Read(strm, opts); } // Reads a binary dump of the symbol table. static SymbolTable *Read(const string& filename) { std::ifstream strm(filename, std::ios_base::in | std::ios_base::binary); if (!strm.good()) { LOG(ERROR) << "SymbolTable::Read: Can't open file " << filename; return nullptr; } return Read(strm, filename); } //-------------------------------------------------------- // Derivable Interface (final) //-------------------------------------------------------- // Creates a reference counted copy. virtual SymbolTable *Copy() const { return new SymbolTable(*this); } // Adds a symbol with given key to table. A symbol table also keeps track of // the last available key (highest key value in the symbol table). virtual int64 AddSymbol(const string &symbol, int64 key) { MutateCheck(); return impl_->AddSymbol(symbol, key); } // Adds a symbol to the table. The associated value key is automatically // assigned by the symbol table. virtual int64 AddSymbol(const string &symbol) { MutateCheck(); return impl_->AddSymbol(symbol); } // Adds another symbol table to this table. All key values will be offset // by the current available key (highest key value in the symbol table). // Note string symbols with the same key value will still have the same // key value after the symbol table has been merged, but a different // value. Adding symbol tables do not result in changes in the base table. virtual void AddTable(const SymbolTable &table); virtual void RemoveSymbol(int64 key) { MutateCheck(); return impl_->RemoveSymbol(key); } // Returns the name of the symbol table. virtual const string &Name() const { return impl_->Name(); } // Sets the name of the symbol table. virtual void SetName(const string &new_name) { MutateCheck(); impl_->SetName(new_name); } // Return the label-agnostic MD5 check-sum for this table. All new symbols // added to the table will result in an updated checksum. Deprecated. virtual const string &CheckSum() const { return impl_->CheckSum(); } // Same as CheckSum(), but returns an label-dependent version. virtual const string &LabeledCheckSum() const { return impl_->LabeledCheckSum(); } virtual bool Write(std::ostream &strm) const { return impl_->Write(strm); } bool Write(const string &filename) const { std::ofstream strm(filename, std::ios_base::out | std::ios_base::binary); if (!strm.good()) { LOG(ERROR) << "SymbolTable::Write: Can't open file " << filename; return false; } return Write(strm); } // Dump a text representation of the symbol table via a stream. virtual bool WriteText(std::ostream &strm, const SymbolTableTextOptions &opts = SymbolTableTextOptions()) const; // Dump an text representation of the symbol table. bool WriteText(const string &filename) const { std::ofstream strm(filename); if (!strm.good()) { LOG(ERROR) << "SymbolTable::WriteText: Can't open file " << filename; return false; } return WriteText(strm); } // Returns the string associated with the key; if the key is out of // range (<0, >max), returns an empty string. virtual string Find(int64 key) const { return impl_->Find(key); } // Returns the key associated with the symbol; if the symbol does not exist, // kNoSymbol is returned. virtual int64 Find(const string &symbol) const { return impl_->Find(symbol); } // Returns the key associated with the symbol; if the symbol does not exist, // kNoSymbol is returned. virtual int64 Find(const char *symbol) const { return impl_->Find(symbol); } virtual bool Member(int64 key) const { return impl_->Member(key); } virtual bool Member(const string &symbol) const { return impl_->Member(symbol); } // Returns the current available key (i.e., highest key + 1) in the symbol // table. virtual int64 AvailableKey() const { return impl_->AvailableKey(); } // Returns the current number of symbols in table (not necessarily equal to // AvailableKey()). virtual size_t NumSymbols() const { return impl_->NumSymbols(); } virtual int64 GetNthKey(ssize_t pos) const { return impl_->GetNthKey(pos); } private: explicit SymbolTable(internal::SymbolTableImpl *impl) : impl_(impl) {} void MutateCheck() { if (!impl_.unique()) impl_.reset(new internal::SymbolTableImpl(*impl_)); } const internal::SymbolTableImpl *Impl() const { return impl_.get(); } private: std::shared_ptr<internal::SymbolTableImpl> impl_; }; // Iterator class for symbols in a symbol table. class SymbolTableIterator { public: explicit SymbolTableIterator(const SymbolTable &table) : table_(table), pos_(0), nsymbols_(table.NumSymbols()), key_(table.GetNthKey(0)) {} ~SymbolTableIterator() {} // Returns whether iterator is done. bool Done() const { return (pos_ == nsymbols_); } // Return the key of the current symbol. int64 Value() const { return key_; } // Return the string of the current symbol. string Symbol() const { return table_.Find(key_); } // Advances iterator. void Next() { ++pos_; if (pos_ < nsymbols_) key_ = table_.GetNthKey(pos_); } // Resets iterator. void Reset() { pos_ = 0; key_ = table_.GetNthKey(0); } private: const SymbolTable &table_; ssize_t pos_; size_t nsymbols_; int64 key_; }; // Relabels a symbol table as specified by the input vector of pairs // (old label, new label). The new symbol table only retains symbols // for which a relabeling is explicitly specified. // // TODO(allauzen): consider adding options to allow for some form of implicit // identity relabeling. template <class Label> SymbolTable *RelabelSymbolTable(const SymbolTable *table, const std::vector<std::pair<Label, Label>> &pairs) { auto new_table = new SymbolTable(table->Name().empty() ? string() : (string("relabeled_") + table->Name())); for (const auto &pair : pairs) { new_table->AddSymbol(table->Find(pair.first), pair.second); } return new_table; } // Returns true if the two symbol tables have equal checksums. Passing in // nullptr for either table always returns true. bool CompatSymbols(const SymbolTable *syms1, const SymbolTable *syms2, bool warning = true); // Symbol Table serialization. void SymbolTableToString(const SymbolTable *table, string *result); SymbolTable *StringToSymbolTable(const string &str); } // namespace fst #endif // FST_SYMBOL_TABLE_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/import_magicdata.py
#!/usr/bin/env python import glob import os import tarfile import wave import pandas from coqui_stt_training.util.importers import get_importers_parser COLUMN_NAMES = ["wav_filename", "wav_filesize", "transcript"] def extract(archive_path, target_dir): print("Extracting {} into {}...".format(archive_path, target_dir)) with tarfile.open(archive_path) as tar: tar.extractall(target_dir) def is_file_truncated(wav_filename, wav_filesize): with wave.open(wav_filename, mode="rb") as fin: assert fin.getframerate() == 16000 assert fin.getsampwidth() == 2 assert fin.getnchannels() == 1 header_duration = fin.getnframes() / fin.getframerate() filesize_duration = (wav_filesize - 44) / 16000 / 2 return header_duration != filesize_duration def preprocess_data(folder_with_archives, target_dir): # First extract subset archives for subset in ("train", "dev", "test"): extract( os.path.join( folder_with_archives, "magicdata_{}_set.tar.gz".format(subset) ), target_dir, ) # Folder structure is now: # - magicdata_{train,dev,test}.tar.gz # - magicdata/ # - train/*.wav # - train/TRANS.txt # - dev/*.wav # - dev/TRANS.txt # - test/*.wav # - test/TRANS.txt # The TRANS files are CSVs with three columns, one containing the WAV file # name, one containing the speaker ID, and one containing the transcription def load_set(set_path): transcripts = pandas.read_csv( os.path.join(set_path, "TRANS.txt"), sep="\t", index_col=0 ) glob_path = os.path.join(set_path, "*", "*.wav") set_files = [] for wav in glob.glob(glob_path): try: wav_filename = wav wav_filesize = os.path.getsize(wav) transcript_key = os.path.basename(wav) transcript = transcripts.loc[transcript_key, "Transcription"] # Some files in this dataset are truncated, the header duration # doesn't match the file size. This causes errors at training # time, so check here if things are fine before including a file if is_file_truncated(wav_filename, wav_filesize): print( "Warning: File {} is corrupted, header duration does " "not match file size. Ignoring.".format(wav_filename) ) continue set_files.append((wav_filename, wav_filesize, transcript)) except KeyError: print("Warning: Missing transcript for WAV file {}.".format(wav)) return set_files for subset in ("train", "dev", "test"): print("Loading {} set samples...".format(subset)) subset_files = load_set(os.path.join(target_dir, subset)) df = pandas.DataFrame(data=subset_files, columns=COLUMN_NAMES) # Trim train set to under 10s if subset == "train": durations = (df["wav_filesize"] - 44) / 16000 / 2 df = df[durations <= 10.0] print("Trimming {} samples > 10 seconds".format((durations > 10.0).sum())) with_noise = df["transcript"].str.contains(r"\[(FIL|SPK)\]") df = df[~with_noise] print( "Trimming {} samples with noise ([FIL] or [SPK])".format( sum(with_noise) ) ) dest_csv = os.path.join(target_dir, "magicdata_{}.csv".format(subset)) print("Saving {} set into {}...".format(subset, dest_csv)) df.to_csv(dest_csv, index=False) def main(): # https://openslr.org/68/ parser = get_importers_parser(description="Import MAGICDATA corpus") parser.add_argument( "folder_with_archives", help="Path to folder containing magicdata_{train,dev,test}.tar.gz", ) parser.add_argument( "--target_dir", default="", help="Target folder to extract files into and put the resulting CSVs. Defaults to a folder called magicdata next to the archives", ) params = parser.parse_args() if not params.target_dir: params.target_dir = os.path.join(params.folder_with_archives, "magicdata") preprocess_data(params.folder_with_archives, params.target_dir) if __name__ == "__main__": main()
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/script/stateiterator-class.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_STATEITERATOR_CLASS_H_ #define FST_SCRIPT_STATEITERATOR_CLASS_H_ #include <memory> #include <fst/fstlib.h> #include <fst/script/fst-class.h> // Scripting API support for StateIterator. namespace fst { namespace script { // Virtual interface implemented by each concrete StateIteratorImpl<F>. class StateIteratorImplBase { public: virtual bool Done() const = 0; virtual int64_t Value() const = 0; virtual void Next() = 0; virtual void Reset() = 0; virtual ~StateIteratorImplBase() {} }; // Templated implementation. template <class Arc> class StateIteratorClassImpl : public StateIteratorImplBase { public: explicit StateIteratorClassImpl(const Fst<Arc> &fst) : siter_(fst) {} bool Done() const final { return siter_.Done(); } int64_t Value() const final { return siter_.Value(); } void Next() final { siter_.Next(); } void Reset() final { siter_.Reset(); } ~StateIteratorClassImpl() override {} private: StateIterator<Fst<Arc>> siter_; }; class StateIteratorClass; using InitStateIteratorClassArgs = std::pair<const FstClass &, StateIteratorClass *>; // Untemplated user-facing class holding a templated pimpl. class StateIteratorClass { public: explicit StateIteratorClass(const FstClass &fst); template <class Arc> explicit StateIteratorClass(const Fst<Arc> &fst) : impl_(new StateIteratorClassImpl<Arc>(fst)) {} bool Done() const { return impl_->Done(); } int64_t Value() const { return impl_->Value(); } void Next() { impl_->Next(); } void Reset() { impl_->Reset(); } template <class Arc> friend void InitStateIteratorClass(InitStateIteratorClassArgs *args); private: std::unique_ptr<StateIteratorImplBase> impl_; }; template <class Arc> void InitStateIteratorClass(InitStateIteratorClassArgs *args) { const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>()); std::get<1>(*args)->impl_.reset(new StateIteratorClassImpl<Arc>(fst)); } } // namespace script } // namespace fst #endif // FST_SCRIPT_STATEITERATOR_CLASS_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/Makefile.am
SUBDIRS = include lib script bin test extensions
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/util/mmap.hh
#ifndef UTIL_MMAP_H #define UTIL_MMAP_H // Utilities for mmaped files. #include <cstddef> #include <limits> #include <stdint.h> #include <sys/types.h> namespace util { class scoped_fd; std::size_t SizePage(); // (void*)-1 is MAP_FAILED; this is done to avoid including the mmap header here. class scoped_mmap { public: scoped_mmap(bool load_from_memory=false) : data_((void*)-1), size_(0), load_from_memory_(load_from_memory) {} scoped_mmap(void *data, std::size_t size, bool load_from_memory=false) : data_(data), size_(size), load_from_memory_(load_from_memory) {} ~scoped_mmap(); void *get() const { return data_; } const char *begin() const { return reinterpret_cast<char*>(data_); } char *begin() { return reinterpret_cast<char*>(data_); } const char *end() const { return reinterpret_cast<char*>(data_) + size_; } char *end() { return reinterpret_cast<char*>(data_) + size_; } std::size_t size() const { return size_; } bool load_from_memory_; void reset(void *data, std::size_t size) { scoped_mmap other(data_, size_); data_ = data; size_ = size; } void reset() { reset((void*)-1, 0); } void *steal() { void *ret = data_; data_ = (void*)-1; size_ = 0; return ret; } private: void *data_; std::size_t size_; scoped_mmap(const scoped_mmap &); scoped_mmap &operator=(const scoped_mmap &); }; /* For when the memory might come from mmap or malloc. Uses NULL and 0 for * blanks even though mmap signals errors with (void*)-1). */ class scoped_memory { public: typedef enum { // TODO: store rounded up size instead? MMAP_ROUND_1G_ALLOCATED, // The size was rounded up for a 1GB page. Do the same before munmap. MMAP_ROUND_2M_ALLOCATED, // The size was rounded up for a 2MB page. Do the same before munmap. MMAP_ROUND_PAGE_ALLOCATED, // The size was rounded up to a multiple of the default page size. Do the same before munmap. MMAP_ALLOCATED, // munmap MALLOC_ALLOCATED, // free NONE_ALLOCATED // nothing to free (though there can be something here if it's owned by somebody else). } Alloc; scoped_memory(void *data, std::size_t size, Alloc source, bool load_from_memory=false) : data_(data), size_(size), source_(source), load_from_memory_(load_from_memory) {} scoped_memory(bool load_from_memory_=false) : data_(NULL), size_(0), source_(NONE_ALLOCATED), load_from_memory_(load_from_memory_) {} // Calls HugeMalloc scoped_memory(std::size_t to, bool zero_new, bool load_from_memory_=false); #if __cplusplus >= 201103L scoped_memory(scoped_memory &&from) noexcept : data_(from.data_), size_(from.size_), source_(from.source_) { from.steal(); } #endif ~scoped_memory() { reset(); } void *get() const { return data_; } const char *begin() const { return reinterpret_cast<char*>(data_); } char *begin() { return reinterpret_cast<char*>(data_); } const char *end() const { return reinterpret_cast<char*>(data_) + size_; } char *end() { return reinterpret_cast<char*>(data_) + size_; } std::size_t size() const { return size_; } bool load_from_memory_; Alloc source() const { return source_; } void reset() { reset(NULL, 0, NONE_ALLOCATED); } void reset(void *data, std::size_t size, Alloc from); void *steal() { void *ret = data_; data_ = NULL; size_ = 0; source_ = NONE_ALLOCATED; return ret; } private: void *data_; std::size_t size_; Alloc source_; scoped_memory(const scoped_memory &); scoped_memory &operator=(const scoped_memory &); }; extern const int kFileFlags; // Cross-platform, error-checking wrapper for mmap(). void *MapOrThrow(std::size_t size, bool for_write, int flags, bool prefault, int fd, uint64_t offset = 0); void *MapOrThrow(std::size_t size, bool for_write, int flags, bool prefault, const char *file_data, uint64_t offset = 0); // msync wrapper void SyncOrThrow(void *start, size_t length); // Cross-platform, error-checking wrapper for munmap(). void UnmapOrThrow(void *start, size_t length); // Allocate memory, promising that all/vast majority of it will be used. Tries // hard to use huge pages on Linux. // If you want zeroed memory, pass zeroed = true. void HugeMalloc(std::size_t size, bool zeroed, scoped_memory &to); // Reallocates memory ala realloc but with option to zero the new memory. // On Linux, the memory can come from anonymous mmap or malloc/calloc. // On non-Linux, only malloc/calloc is supported. // // To summarize, any memory from HugeMalloc or HugeRealloc can be resized with // this. void HugeRealloc(std::size_t size, bool new_zeroed, scoped_memory &mem); enum LoadMethod { // mmap with no prepopulate LAZY, // On linux, pass MAP_POPULATE to mmap. POPULATE_OR_LAZY, // Populate on Linux. malloc and read on non-Linux. POPULATE_OR_READ, // malloc and read. READ, // malloc and read in parallel (recommended for Lustre) PARALLEL_READ, }; void MapRead(LoadMethod method, int fd, uint64_t offset, std::size_t size, scoped_memory &out); void MapRead(LoadMethod method, const char *file_data, uint64_t offset, std::size_t size, scoped_memory &out); // Open file name with mmap of size bytes, all of which are initially zero. void *MapZeroedWrite(int fd, std::size_t size); void *MapZeroedWrite(const char *name, std::size_t size, scoped_fd &file); // Forward rolling memory map with no overlap. class Rolling { public: Rolling() {} explicit Rolling(void *data) { Init(data); } Rolling(const Rolling &copy_from, uint64_t increase = 0); Rolling &operator=(const Rolling &copy_from); // For an actual rolling mmap. explicit Rolling(int fd, bool for_write, std::size_t block, std::size_t read_bound, uint64_t offset, uint64_t amount); // For a static mapping void Init(void *data) { ptr_ = data; current_end_ = std::numeric_limits<uint64_t>::max(); current_begin_ = 0; // Mark as a pass-through. fd_ = -1; } void IncreaseBase(uint64_t by) { file_begin_ += by; ptr_ = static_cast<uint8_t*>(ptr_) + by; if (!IsPassthrough()) current_end_ = 0; } void DecreaseBase(uint64_t by) { file_begin_ -= by; ptr_ = static_cast<uint8_t*>(ptr_) - by; if (!IsPassthrough()) current_end_ = 0; } void *ExtractNonRolling(scoped_memory &out, uint64_t index, std::size_t size); // Returns base pointer void *get() const { return ptr_; } // Returns base pointer. void *CheckedBase(uint64_t index) { if (index >= current_end_ || index < current_begin_) { Roll(index); } return ptr_; } // Returns indexed pointer. void *CheckedIndex(uint64_t index) { return static_cast<uint8_t*>(CheckedBase(index)) + index; } private: void Roll(uint64_t index); // True if this is just a thin wrapper on a pointer. bool IsPassthrough() const { return fd_ == -1; } void *ptr_; uint64_t current_begin_; uint64_t current_end_; scoped_memory mem_; int fd_; uint64_t file_begin_; uint64_t file_end_; bool for_write_; std::size_t block_; std::size_t read_bound_; }; } // namespace util #endif // UTIL_MMAP_H
0
coqui_public_repos/STT-models/indonesian/itml
coqui_public_repos/STT-models/indonesian/itml/v0.1.1/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT-models/swahili-congo/twb
coqui_public_repos/STT-models/swahili-congo/twb/v0.3.0/swc-tico-test.csv
wav_filename,wav_filesize,transcript session_87_4574_swc.wav,23891,na uko na moja katika alama hizi zaidi ya maumivu ya kifua session_86_4063_swc.wav,34261,na uko na moja katika alama hizi zaidi ya maumivu ya kifua session_87_4575_swc.wav,16691,na pua yako inatosha makamasi session_86_4064_swc.wav,13813,na pua yako inatosha makamasi session_87_4576_swc.wav,16019,na maumivu inatokea mu kifua yako session_86_4065_swc.wav,61621,na maumivu inatokea mu kifua yako session_87_4577_swc.wav,16211,na ukunywe maji mingi session_86_4066_swc.wav,22933,na ukunywe maji mingi session_87_4578_swc.wav,19859,na iko karibuni na alama za kufanana session_86_4067_swc.wav,26101,na iko karibuni na alama za kufanana session_87_4579_swc.wav,19379,na uniambie ni alama gani uko nazo kwa sasa session_86_4068_swc.wav,25141,na uniambie ni alama gani uko nazo kwa sasa session_87_4580_swc.wav,20819,na alama zako hazipotei mu siku tano session_86_4069_swc.wav,15733,na alama zako hazipotei mu siku tano session_87_4581_swc.wav,23411,maumivu ya sana ngambo ya kushoto ya kifua yako session_86_4070_swc.wav,19381,maumivu ya sana ngambo ya kushoto ya kifua yako session_87_4582_swc.wav,16691,uko na shida ya kupumua kwa sasa session_86_4071_swc.wav,12661,uko na shida ya kupumua kwa sasa session_87_4583_swc.wav,29939,kutowa magonjwa wa kisukari unakuaka na shida ao magonjwa kali zingine session_86_4072_swc.wav,64981,kutowa magonjwa wa kisukari unakuaka na shida ao magonjwa kali zingine session_87_4584_swc.wav,13331,na kama unakohola session_86_4073_swc.wav,9781,na kama unakohola session_87_4585_swc.wav,18419,kwa sababu banakohola session_86_4074_swc.wav,8341,kwa sababu banakohola session_87_4586_swc.wav,25619,unajua kama uko na matatizo ya ugonjwa wa moyo session_86_4075_swc.wav,16021,unajua kama uko na matatizo ya ugonjwa wa moyo session_87_4587_swc.wav,15251,watu wako wa karibu wako na zile ma alama session_86_4076_swc.wav,24853,watu wako wa karibu wako na zile ma alama session_87_4588_swc.wav,40019,uko na kikohozi ya kukauka mafua pua yenye kutoa makamasi kutapika kuhara session_86_4077_swc.wav,35221,uko na kikohozi ya kukauka mafua pua yenye kutoa makamasi kutapika kuhara session_87_4589_swc.wav,18611,sizani kama tension yangu inapanda session_86_4078_swc.wav,14773,sizani kama tension yangu inapanda session_87_4590_swc.wav,25811,minasikia maumivu ndani ya kifua hapa mbele ya kifua session_86_4079_swc.wav,22933,minasikia maumivu ndani ya kifua hapa mbele ya kifua session_87_4591_swc.wav,21491,nilisikia maumivu kwa muda mfupi ndani ya kifua session_86_4080_swc.wav,20533,nilisikia maumivu kwa muda mfupi ndani ya kifua session_87_4592_swc.wav,16211,lakini minakuaka na mafua pia session_86_4081_swc.wav,14581,lakini minakuaka na mafua pia session_87_4593_swc.wav,20339,nilifanya ku mwili pembeni ya kifua session_86_4082_swc.wav,15541,nilifanya ku mwili pembeni ya kifua session_87_4594_swc.wav,30899,naona kama inaanzia katikati ya kifua yako na inapanda mpaka ku shingo yako session_86_4083_swc.wav,33781,naona kama inaanzia katikati ya kifua yako na inapanda mpaka ku shingo yako session_87_4595_swc.wav,14771,inaluma mu kifua session_86_4084_swc.wav,9493,inaluma mu kifua session_87_4596_swc.wav,18131,inapitikana katikati ya kifua yangu session_86_4085_swc.wav,26581,inapitikana katikati ya kifua yangu session_87_4597_swc.wav,13331,ni katikati ya kifua yangu session_86_4086_swc.wav,13141,ni katikati ya kifua yangu session_87_4598_swc.wav,23219,iko sawa vile mafua ya kawaida ao kama homa session_86_4087_swc.wav,21781,iko sawa vile mafua ya kawaida ao kama homa session_87_4599_swc.wav,12851,nitakutumia picha session_86_4088_swc.wav,19093,nitakutumia picha session_87_4600_swc.wav,15731,nitakutumia picha ku ecran yako session_86_4089_swc.wav,15061,nitakutumia picha ku ecran yako session_87_4601_swc.wav,21779,mara mingi ni aina ya iyi maumivu mu kifua session_86_4090_swc.wav,19093,mara mingi ni aina ya iyi maumivu mu kifua session_87_4602_swc.wav,15539,dada yangu iko na ma alama za vile vile session_86_4091_swc.wav,14581,dada yangu iko na ma alama za vile vile session_87_4603_swc.wav,13331,minakutumia picha sasa hivi session_86_4092_swc.wav,26293,minakutumia picha sasa hivi session_87_4604_swc.wav,14099,ao kama uko na ugonjwa wa moyo session_86_4093_swc.wav,12853,ao kama uko na ugonjwa wa moyo session_87_4605_swc.wav,20531,manaake maumivu mu kifua yako ilianza iyi asubui session_86_4094_swc.wav,43861,manaake maumivu mu kifua yako ilianza iyi asubui session_87_4606_swc.wav,12851,homa ilianza tangu siku mbili session_86_4095_swc.wav,12373,homa ilianza tangu siku mbili session_87_4607_swc.wav,13811,maumivu ya mwili na mafua session_86_4096_swc.wav,9781,maumivu ya mwili na mafua session_87_4608_swc.wav,19379,sawa kulingana na miye ni mafua ndogo session_86_4097_swc.wav,17941,sawa kulingana na miye ni mafua ndogo session_87_4609_swc.wav,29459,sawa mama kutokana na alama zako inawezekana uko na virusi ya mafua session_86_4098_swc.wav,37333,sawa mama kutokana na alama zako inawezekana uko na virusi ya mafua session_87_4610_swc.wav,20339,inawezekana uko na maumivu mu kifua session_86_4099_swc.wav,14581,inawezekana uko na maumivu mu kifua session_87_4611_swc.wav,36179,chama cha taifa ya basket ilishimamisha michezo kwa sababu ya ugonjwa ya virusi ya corona session_86_4100_swc.wav,36373,chama cha taifa ya basket ilishimamisha michezo kwa sababu ya ugonjwa ya virusi ya corona session_87_4612_swc.wav,56819,siku ya tatu chama cha taifa ya basket nba cha marekani ilishimamisha michezo ya basket juu ya wasiwasi kutokana na ugonjwa wa corona session_86_4101_swc.wav,76021,siku ya tatu chama cha taifa ya basket nba cha marekani ilishimamisha michezo ya basket juu ya wasiwasi kutokana na ugonjwa wa corona session_87_4613_swc.wav,37139,uamuzi ya nba ilifuatana na kesi ya mchezaji wa utah jazz mwenye alikutwa na corona kisha kupimwa session_86_4102_swc.wav,39541,uamuzi ya nba ilifuatana na kesi ya mchezaji wa utah jazz mwenye alikutwa na corona kisha kupimwa session_87_4614_swc.wav,27731,nba haikutambulisha jina ya ule mchezaji katika tangazo yake session_86_4103_swc.wav,26101,nba haikutambulisha jina ya ule mchezaji katika tangazo yake session_87_4615_swc.wav,55379,lakini walisema kama hakukuwa ku chesapeake energy arena ya muji wa oklahoma kwenye utah jazz ingepashwa cheza na oklahoma city thunder session_86_4104_swc.wav,57013,lakini walisema kama hakukuwa ku chesapeake energy arena ya muji wa oklahoma kwenye utah jazz ingepashwa cheza na oklahoma city thunder session_87_4616_swc.wav,73619,kutokana na tangazo ya vyombo vya habari nba inashimamisha michezo kutokana na kalendari ya michezo ya siku ya tatu mpaka siku maamuzi mupya itatolewa session_86_4105_swc.wav,64021,kutokana na tangazo ya vyombo vya habari nba inashimamisha michezo kutokana na kalendari ya michezo ya siku ya tatu mpaka siku maamuzi mupya itatolewa session_87_4617_swc.wav,69299,match ya jazz ilishimamishwa kama vile tu match ingine ya nba yenye ingeli chezwa siku ya tatu kati ya sacramento kings na wa new orleans pelicans session_87_4618_swc.wav,37139,zingine ma matchs zenye zilipangwa siku ya tatu zote zilichezwa kutokana na habari ya nbc news session_86_4107_swc.wav,71221,zingine ma matchs zenye zilipangwa siku ya tatu zote zilichezwa kutokana na habari ya nbc news session_87_4619_swc.wav,69011,ingine shirika kubwa ya basketball ya marekani national collegiate athletic association inayo simamia basketball kwenye vyuo vikuu ilishimamisha mashindano yake march madness kwa ligi ya wanaume na wanamuke session_87_4622_swc.wav,35411,raisi wa marekani trump anatangaza virusi ya corona kuwa ugonjwa inayo husu inchi muzima session_86_4111_swc.wav,34261,raisi wa marekani trump anatangaza virusi ya corona kuwa ugonjwa inayo husu inchi muzima session_87_4623_swc.wav,57011,siku ya tano raisi wa marekani donald trump alitangaza zarura mu taifa nzima kwa kupiganisha muripuko ya ugonjwa wa sasa wa virusi ya corona session_86_4112_swc.wav,55573,siku ya tano raisi wa marekani donald trump alitangaza zarura mu taifa nzima kwa kupiganisha muripuko ya ugonjwa wa sasa wa virusi ya corona session_87_4624_swc.wav,66419,hiyo uamuzi ilitangazwa siku mbili baada ya shirika la afya duniani oms kutangaza muripuko hiyo ya magonjwa yenye kusababisha corona kuwa ugonjwa mu inchi za mingi session_86_4113_swc.wav,70741,hiyo uamuzi ilitangazwa siku mbili baada ya shirika la afya duniani oms kutangaza muripuko hiyo ya magonjwa yenye kusababisha corona kuwa ugonjwa mu inchi za mingi session_87_4625_swc.wav,69299,uamuzi inapatia mamulaka serkali ya shirikisho kupata karibu dola bilioni makumi tano ya marekani za ziada juu ya kupiganisha hiyo muripuko ya magonjwa iliripoti nbc news session_87_4626_swc.wav,79091,tangazo ilisema wazi kama muripuko ya magonjwa ya corona huko marekani ni zarura ya kitaifa na ikatangaza kama zarura ya kitaifa ilianza tangu tarehe moja ya mwezi ya tatu karibu juma mbili mbele ya iyi tangazo session_87_4630_swc.wav,43379,trump alikamata uamuzi huo kutokana na masharti ya sheria ya zarura ya kitaifa tukitaja vyombo vingine vinavyo ongoza session_86_4119_swc.wav,65653,trump alikamata uamuzi huo kutokana na masharti ya sheria ya zarura ya kitaifa tukitaja vyombo vingine vinavyo ongoza session_87_4641_swc.wav,55859,mfano wa kihesabu inaonesha kama maambukizi ya muripuko ya magonjwa inaweza kucheleweshwa kupitia kufungwa kwa masomo session_86_4130_swc.wav,57781,mfano wa kihesabu inaonesha kama maambukizi ya muripuko ya magonjwa inaweza kucheleweshwa kupitia kufungwa kwa masomo session_87_4642_swc.wav,40499,lakini matokeo inafatana na mawasiliano yenye watoto wanaendelea nayo inje ya masomo session_86_4131_swc.wav,39541,lakini matokeo inafatana na mawasiliano yenye watoto wanaendelea nayo inje ya masomo session_87_4643_swc.wav,42131,kufungwa kwa masomo inaweza kuwa na matokeo mazuri wakati inachukuliwa mbio mbio session_86_4132_swc.wav,31573,kufungwa kwa masomo inaweza kuwa na matokeo mazuri wakati inachukuliwa mbio mbio session_87_4644_swc.wav,54899,kama kufungwa kwa masomo inachelewa kulingana na muripuko ya magonjwa uwezo wake itakuwa ndogo na matokeo yake inawezekana isikuwe session_86_4133_swc.wav,61141,kama kufungwa kwa masomo inachelewa kulingana na muripuko ya magonjwa uwezo wake itakuwa ndogo na matokeo yake inawezekana isikuwe session_87_4645_swc.wav,64211,zaidi ya hayo katika muda ambapo kufunguliwa kwa masomo kisha kipindi yenye zilikuwa zimefungwa ilisababisha maambukizi kuongezeka session_86_4134_swc.wav,58261,zaidi ya hayo katika muda ambapo kufunguliwa kwa masomo kisha kipindi yenye zilikuwa zimefungwa ilisababisha maambukizi kuongezeka session_87_4647_swc.wav,43379,miji za kwanza zenye zilitumia hizi hatua zilipitisha wakati murefu mbele ya kufikisha uwingi wa vifo session_86_4136_swc.wav,42613,miji za kwanza zenye zilitumia hizi hatua zilipitisha wakati murefu mbele ya kufikisha uwingi wa vifo session_87_4659_swc.wav,74579,maambukizi makubwa ikitokea kwa wakaaji wa eneo zaidi ya hatua ya watu kutosogeleana kuongeza siku za mapumuziko ya wanafunzi inaweza pia hitajika session_86_4148_swc.wav,72853,maambukizi makubwa ikitokea kwa wakaaji wa eneo zaidi ya hatua ya watu kutosogeleana kuongeza siku za mapumuziko ya wanafunzi inaweza pia hitajika session_87_4661_swc.wav,35699,vyuo vikuu na masomo zote mu inchi zilifungwa session_86_4150_swc.wav,19573,vyuo vikuu na masomo zote mu inchi zilifungwa session_87_4665_swc.wav,75731,na kwa iyo unesco iliomba inchi kusaidia wanafunzi waliyo guswa na jamaa zabo na kuweka mipango ya mafunzo kwa mbali na yenye itagusa eneo mingi session_86_4154_swc.wav,69973,na kwa iyo unesco iliomba inchi kusaidia wanafunzi waliyo guswa na jamaa zabo na kuweka mipango ya mafunzo kwa mbali na yenye itagusa eneo mingi session_87_4676_swc.wav,57491,serkali ililazimisha mafungo ya juma mbili kwa kusaidia wanafunzi kujitayarisha kuanza kusoma kwa mbali kwa muda mufupi session_86_4165_swc.wav,50581,serkali ililazimisha mafungo ya juma mbili kwa kusaidia wanafunzi kujitayarisha kuanza kusoma kwa mbali kwa muda mufupi session_87_4677_swc.wav,59699,vyuo vikuu zilifunga kwa muda wa juma moja lakini zikaanza tena toa mafunzo kupitia mtandao wa internet session_86_4166_swc.wav,51733,vyuo vikuu zilifunga kwa muda wa juma moja lakini zikaanza tena toa mafunzo kupitia mtandao wa internet session_87_4678_swc.wav,48179,huduma zingine za masomo ziliendelea zinafanyikwa lakini mafunzo ilikuwa inafanyika tu kwa mbali session_86_4167_swc.wav,39733,huduma zingine za masomo ziliendelea zinafanyikwa lakini mafunzo ilikuwa inafanyika tu kwa mbali session_87_4680_swc.wav,64211,maeneo kwenye masomo zinabakia nafunguliwa ni taiwan singapour australia swede na maeneo tafauti za marekani session_86_4169_swc.wav,49813,maeneo kwenye masomo zinabakia nafunguliwa ni taiwan singapour australia swede na maeneo tafauti za marekani session_87_4682_swc.wav,33299,wengine waikuwa nasumbuka sana juu ya kufungwa mu maeneo tafauti session_86_4171_swc.wav,31381,wengine waikuwa nasumbuka sana juu ya kufungwa mu maeneo tafauti session_87_4687_swc.wav,40979,hesabu ya wanafunzi inatokana na habari mupya za institut ya statisque ya unesco session_86_4176_swc.wav,28021,hesabu ya wanafunzi inatokana na habari mupya za institut ya statisque ya unesco session_87_4688_swc.wav,79379,kufunga masomo juu ya kupiganisha muripuko ya magonjwa ya corona ilikuwa na vinyume kuhusu kupata elimu na shida za kiuchumi na kijamii kwa kiasi kikubwa session_86_4177_swc.wav,80053,kufunga masomo juu ya kupiganisha muripuko ya magonjwa ya corona ilikuwa na vinyume kuhusu kupata elimu na shida za kiuchumi na kijamii kwa kiasi kikubwa session_87_4693_swc.wav,60179,kufungiwa mu eneo mbalimbali inagandamiza masomo juu wazazi na viongozi wanaongoza watoto kwenye masomo zenye zinafungula session_86_4182_swc.wav,53941,kufungiwa mu eneo mbalimbali inagandamiza masomo juu wazazi na viongozi wanaongoza watoto kwenye masomo zenye zinafungula session_87_4694_swc.wav,72179,wanamuke ni wengi kati ya wafanyakazi wa kiafya na hawawezi enda ku kazi mara mingi juu ya majukumu zao za uzazi zinazo tokana na masomo kufungwa session_86_4183_swc.wav,75253,wanamuke ni wengi kati ya wafanyakazi wa kiafya na hawawezi enda ku kazi mara mingi juu ya majukumu zao za uzazi zinazo tokana na masomo kufungwa session_87_4695_swc.wav,62771,ile inaonesha ya kama wafanyakazi wengi wa kiafya hawapatikani ndani ya masomo kwenye wako na lazima nabo zaidi wakati ya shida ya kiafya session_87_4697_swc.wav,76211,ukosefu ya kutumia teknolojia ao connexion internet ya muzuri inazuwia maendeleo ya mafundisho hasa kwa wanafunzi wanao tokea kwenye jamaa zisizo jiweza session_86_4186_swc.wav,62773,ukosefu ya kutumia teknolojia ao connexion internet ya muzuri inazuwia maendeleo ya mafundisho hasa kwa wanafunzi wanao tokea kwenye jamaa zisizo jiweza session_87_4700_swc.wav,56531,kwa wanafunzi wasiyokuwa na mtandao wa internet nyumbani kwao hio inaongeza magumu ya kupata mafunzo kwa mbali session_86_4189_swc.wav,40981,kwa wanafunzi wasiyokuwa na mtandao wa internet nyumbani kwao hio inaongeza magumu ya kupata mafunzo kwa mbali session_87_4701_swc.wav,75731,kufungwa kwa masomo inalazimisha wazazi na wasimamizi wa watoto kuchunga na kuwaongoza katika mafunzo ya mbali wakati watoto hawasomi session_86_4190_swc.wav,63733,kufungwa kwa masomo inalazimisha wazazi na wasimamizi wa watoto kuchunga na kuwaongoza katika mafunzo ya mbali wakati watoto hawasomi session_87_4703_swc.wav,47219,watoto wengi mu dunia wanategemea chakula za bure ao za bei chini zenye zinapewa ku masomo session_86_4192_swc.wav,37813,watoto wengi mu dunia wanategemea chakula za bure ao za bei chini zenye zinapewa ku masomo session_87_4707_swc.wav,47219,kufungwa kwa masomo iko na vinyume ya mubaya ku matokeo ya mafunzo ya wanafunzi session_86_4196_swc.wav,32341,kufungwa kwa masomo iko na vinyume ya mubaya ku matokeo ya mafunzo ya wanafunzi session_86_4197_swc.wav,75733,masomo inasaidiya kupata elimu ya musingi ya maana na wakati masomo zinafungwa watoto wa adolescents na vijana wazima wanakosa fursa ya maendeleo na mafanyikio session_87_4710_swc.wav,58739,iyi ni kweli hasa wakati kufungwa inaendelea kwa muda murefu masomo ni fasi ya shuruli za kijamii na mawasiliano kati ya watu session_86_4199_swc.wav,59221,iyi ni kweli hasa wakati kufungwa inaendelea kwa muda murefu masomo ni fasi ya shuruli za kijamii na mawasiliano kati ya watu session_87_4712_swc.wav,38579,iyi ni kweli hasa zaidi kwa wazazi wenye kuwa na elimu na kipato kidogo session_86_4201_swc.wav,35413,iyi ni kweli hasa zaidi kwa wazazi wenye kuwa na elimu na kipato kidogo session_87_4713_swc.wav,69011,vinyume yenye inaweza tokea juu ya kufungwa kwa masomo na kutumikisha mafunzo ya mbali haziandaliwi katika sheria za serkali ya sasa session_86_4202_swc.wav,58933,vinyume yenye inaweza tokea juu ya kufungwa kwa masomo na kutumikisha mafunzo ya mbali haziandaliwi katika sheria za serkali ya sasa session_87_4714_swc.wav,72179,elimu inayo paniwa ku masomo tafauti na isiyo paniwa ku masomo inahusu masomo masomo ya elimu ya juu vyuo vikuu na masomo za mafunzo session_86_4203_swc.wav,68341,elimu inayo paniwa ku masomo tafauti na isiyo paniwa ku masomo inahusu masomo masomo ya elimu ya juu vyuo vikuu na masomo za mafunzo session_87_4718_swc.wav,80339,hata kama shule mingi za musingi na sekondari mu dunia zilifungwa kwa sababu ya ugonjwa ya corona hatua zinazo husu mipango za elimu ya mbele ya mafunzo ya musingi zilibadilika session_86_4207_swc.wav,71701,hata kama shule mingi za musingi na sekondari mu dunia zilifungwa kwa sababu ya ugonjwa ya corona hatua zinazo husu mipango za elimu ya mbele ya mafunzo ya musingi zilibadilika session_87_4720_swc.wav,80051,jimbo zimoja za mafunzo zinaweza kutowa suluhisho tafauti za uchungaji ya watoto kwa kuanza na watoto wa wafanyakazi wenye kuwa mustari ya kwanza na watunzaji session_87_4724_swc.wav,72659,mwanzo ya mwezi wa tatu watu wakubwa watano wanao tumika katika kituo cha matunzo ya watoto wa shule ya msingi pa kobe walikutwa na ugongwa ya virusi ya corona session_86_4213_swc.wav,67381,mwanzo ya mwezi wa tatu watu wakubwa watano wanao tumika katika kituo cha matunzo ya watoto wa shule ya msingi pa kobe walikutwa na ugongwa ya virusi ya corona session_87_4725_swc.wav,49139,kisha kupima zaidi ya watoto mia moja ndani ya shule mwanafunzi umoja wa masomo ya maternelle alikutwa na virusi session_86_4214_swc.wav,51253,kisha kupima zaidi ya watoto mia moja ndani ya shule mwanafunzi umoja wa masomo ya maternelle alikutwa na virusi session_87_4726_swc.wav,55091,elimu ya shule ya musingi ao ya chini inalingana na miaka ine mpaka saba ya miaka ya kwanza ya elimu inayo paniwa ku masomo session_86_4215_swc.wav,55093,elimu ya shule ya musingi ao ya chini inalingana na miaka ine mpaka saba ya miaka ya kwanza ya elimu inayo paniwa ku masomo session_87_4729_swc.wav,62291,elimu ya ngazi ya tatu pia inaitwa elimu ya juu inahusu viwango vya elimu vya bila mukazo kisha mwisho ya elimu ya sekondari ao lycee session_86_4218_swc.wav,68533,elimu ya ngazi ya tatu pia inaitwa elimu ya juu inahusu viwango vya elimu vya bila mukazo kisha mwisho ya elimu ya sekondari ao lycee session_87_4730_swc.wav,54131,mara mingi elimu ya ngazi ya tatu iko na elimu ya msingi hadi elimu ya chuo kikuu na mafundisho na mafunzo ya kiufundi session_86_4219_swc.wav,65173,mara mingi elimu ya ngazi ya tatu iko na elimu ya msingi hadi elimu ya chuo kikuu na mafundisho na mafunzo ya kiufundi session_87_4731_swc.wav,46451,watu wenye wanamaliza elimu ya ngazi ya yulu wanapataka ma certificats diplomes ao cheti za chuo kikuu session_86_4220_swc.wav,44053,watu wenye wanamaliza elimu ya ngazi ya yulu wanapataka ma certificats diplomes ao cheti za chuo kikuu session_87_4732_swc.wav,71219,elimu ya ngazi ya kwanza ya chuo kikuu inahusu elimu yenye inatolewa kisha masomo ya sekondari na ya mbele ya masomo ya chuo kikuu ambayo kwa ujumla mwanafunzi anapataka licence session_87_4734_swc.wav,61811,masomo ya elimu ya juu na vyuo vikuu inchini marekani ziliombwa kurudishia kila mwanafunzi franga ya masomo ya kupanga nyumba na ya mahitaji ya kawaida session_86_4223_swc.wav,69973,masomo ya elimu ya juu na vyuo vikuu inchini marekani ziliombwa kurudishia kila mwanafunzi franga ya masomo ya kupanga nyumba na ya mahitaji ya kawaida session_87_4735_swc.wav,59411,huko marekani masomo ya elimu ya juu na vyuo vikuu zinatumika kama miji kidogo zenye zinapana franga mingi ku miji majimbo na maeneo session_86_4224_swc.wav,72181,huko marekani masomo ya elimu ya juu na vyuo vikuu zinatumika kama miji kidogo zenye zinapana franga mingi ku miji majimbo na maeneo session_87_4743_swc.wav,71219,iyi inaweza kuwa njia ya kujifunza mambo ya teknolojia mafunzo kupitia video mooc ao tena kupitia vipindi vya redio na television session_86_4232_swc.wav,64021,iyi inaweza kuwa njia ya kujifunza mambo ya teknolojia mafunzo kupitia video mooc ao tena kupitia vipindi vya redio na television session_87_4745_swc.wav,74579,kuangalia kama inawezekana kuhamisha vifaa vya aina iyi kwa muda mfupi kutoka mu ma salles informatique kwenda mu ma familia na kuwapatia fursa ya kutumikisha internet session_86_4234_swc.wav,74101,kuangalia kama inawezekana kuhamisha vifaa vya aina iyi kwa muda mfupi kutoka mu ma salles informatique kwenda mu ma familia na kuwapatia fursa ya kutumikisha internet session_87_4747_swc.wav,39059,kuhakikisha kama kutumikisha programes na mitandao haitatia siri ya habari kuhusu wanafunzi katika hatari session_86_4236_swc.wav,47701,kuhakikisha kama kutumikisha programes na mitandao haitatia siri ya habari kuhusu wanafunzi katika hatari session_87_4748_swc.wav,73331,kuchagua suluhisho zenye zinajibu ku matatizo ya kiakili mbele ya elimu kutafuta vifaa kwa ajili ya kukusanyisha ma masomo wazazi walimu na wanafunzi session_86_4237_swc.wav,76213,kuchagua suluhisho zenye zinajibu ku matatizo ya kiakili mbele ya elimu kutafuta vifaa kwa ajili ya kukusanyisha ma masomo wazazi walimu na wanafunzi session_87_4751_swc.wav,61811,kupanga gisi ya kutumia wakati kwa kuzingatia hali ya eneo iliyo hasirika kiwango ya elimu mahitaji ya wanafunzi na kupatikana kwa wazazi session_86_4240_swc.wav,71701,kupanga gisi ya kutumia wakati kwa kuzingatia hali ya eneo iliyo hasirika kiwango ya elimu mahitaji ya wanafunzi na kupatikana kwa wazazi session_87_4752_swc.wav,44531,kuchagula namna ya kufundisha inayo stahili kufuatana na hali ya kufungwa kwa masomo na kujifungia ku nyumba session_86_4241_swc.wav,64021,kuchagula namna ya kufundisha inayo stahili kufuatana na hali ya kufungwa kwa masomo na kujifungia ku nyumba session_87_4753_swc.wav,31091,kuepuka namna ya kufundisha ambayo inaomba mawasiliano ya ukaribu session_86_4242_swc.wav,37621,kuepuka namna ya kufundisha ambayo inaomba mawasiliano ya ukaribu session_87_4755_swc.wav,58259,kusaidia walimu kutayarisha mambo ya msingi ya vifaa kwa mfano suluhisho za kutumia habari ku internet kama wanaombwa kupana elimu en direct session_87_4757_swc.wav,45971,kuepuka kupatia wanafunzi na wazazi kazi mingi kwa kuwaomba wa telecharger na wapime programes mingi ku mutandao session_86_4246_swc.wav,60853,kuepuka kupatia wanafunzi na wazazi kazi mingi kwa kuwaomba wa telecharger na wapime programes mingi ku mutandao session_87_4758_swc.wav,71411,kuanzisha sheria za kusoma kwa mbali na kufuata mpango wa kufundisha wanafunzi kueleza kanuni za mafunzo kwa mbali pamoja na wazazi na wanafunzi session_87_4759_swc.wav,57971,kutayarisha maulizo uchunguzi ao mazowezi za mafunzo juu ya kufuatilia kwa ukaribu ginsi mafunzo ya wanafunzi inaendelea session_86_4248_swc.wav,71701,kutayarisha maulizo uchunguzi ao mazowezi za mafunzo juu ya kufuatilia kwa ukaribu ginsi mafunzo ya wanafunzi inaendelea session_87_4760_swc.wav,73811,kujaribu kutumikisha vifaa juu wanafunzi waweze kupana maoni yabo na kuepuka kuwapatia wazazi kazi mingi kwa kuwaomba kutumikisha kompyuta na kutuma maoni ya wanafunzi session_87_4766_swc.wav,45971,ugonjwa huo ni aina ya virusi ya corona familia ya sars mers na aina zingine za mafua ya kawaida session_86_4255_swc.wav,66421,ugonjwa huo ni aina ya virusi ya corona familia ya sars mers na aina zingine za mafua ya kawaida session_87_4769_swc.wav,35891,wazee na wale wenye kuwa na ukingo ya chini njo wako kwenye hatari kubwa pia na kifo session_86_4258_swc.wav,61141,wazee na wale wenye kuwa na ukingo ya chini njo wako kwenye hatari kubwa pia na kifo session_87_4771_swc.wav,23891,ikiwezekana epuka ma eneo yenye kuwa na batu mingi session_86_4260_swc.wav,43381,ikiwezekana epuka ma eneo yenye kuwa na batu mingi session_87_4772_swc.wav,40979,iyi haiko tu juu ya kukukinga lakini pia kukinga batu benye unawasiliana nabo session_86_4261_swc.wav,63733,iyi haiko tu juu ya kukukinga lakini pia kukinga batu benye unawasiliana nabo session_87_4773_swc.wav,69299,inaonekana kama haukosi kitu ya maana ukibakia ku nyumba juu maeneo mingi ya kujifurahishia mu dunia yote zinafunga milango kwa sasa juu ya kuzuia iyi magonjwa isisambarane session_87_4775_swc.wav,48851,kabla ya kuenda soma kanuni mingi za kusafiri zenye kubadilika mara kwa mara ambazo zinalazimishwa mu dunia kote session_86_4264_swc.wav,63733,kabla ya kuenda soma kanuni mingi za kusafiri zenye kubadilika mara kwa mara ambazo zinalazimishwa mu dunia kote session_87_4776_swc.wav,52211,kisha kusafiri chunguza afya yako na upange kujitenga kwa juma mbili juu ya kuepusha kusambaraza ugonjwa kwa watu wengine session_86_4265_swc.wav,69493,kisha kusafiri chunguza afya yako na upange kujitenga kwa juma mbili juu ya kuepusha kusambaraza ugonjwa kwa watu wengine session_87_4778_swc.wav,25139,mipaka mingi za kimataifa na za kando kando zinafungwa session_86_4267_swc.wav,36373,mipaka mingi za kimataifa na za kando kando zinafungwa session_87_4779_swc.wav,74771,kama hauko kwako nyumbani na hasa kama uko inje ya inchi fikiria kurudia haraka iwezekanavyo juu inaweza kuwa nguvu zaidi wakati mashurti inaendelea kubadilika session_87_4780_swc.wav,51059,piko wakati iyi inakuwa haiwezekani hata kama uko na afya bora na unatimiza masharti ya sheria ya kuingia mu inchi session_87_4781_swc.wav,49811,kama haupati namna ya kurudia mu inchi yako wasiliana na ambassade ao consulat ya inchi yako yenye kuwa karibu juu upate musaada session_86_4270_swc.wav,79573,kama haupati namna ya kurudia mu inchi yako wasiliana na ambassade ao consulat ya inchi yako yenye kuwa karibu juu upate musaada session_87_4782_swc.wav,47411,wanaweza panga safari kujua ndege zenye hazisafiri kila siku ao kukopesha franga kwa haraka juu ya kulipia safari session_86_4271_swc.wav,75253,wanaweza panga safari kujua ndege zenye hazisafiri kila siku ao kukopesha franga kwa haraka juu ya kulipia safari session_87_4783_swc.wav,43571,kama haiko vile ujitayarishe kama utalazimika kubakia mahali kwenye uko kwa muda mrefu session_86_4272_swc.wav,56053,kama haiko vile ujitayarishe kama utalazimika kubakia mahali kwenye uko kwa muda mrefu session_87_4784_swc.wav,78611,ukipata namna ya kurudia ku nyumba ujitayarishe kujua kama itakuwa na garama sana na hali yenye haiko ya kawaida na itakua na mashurti yenye haiko ya kawaida kama hakuna mizigo yenye iliandikwa session_87_4786_swc.wav,61811,hata kama iyi magonjwa ilianza uko china ilisambarana dunia muzima na china iko na bagonjwa benye kuhakikishwa na vifo kidogo kuliko inchi zingine mingi session_87_4787_swc.wav,20051,ulaya njo eneo mupya ya ugonjwa session_86_4276_swc.wav,33973,ulaya njo eneo mupya ya ugonjwa session_87_4789_swc.wav,64691,maambukizo ndani ya eneo ilitokea mu inchi mingi za ma eneo yote kubwa ya dunia isipokuwa mataifa mbalimbali yenye kupatikana ndani ya kisanga mu habari ya pacifique session_87_4790_swc.wav,23699,pata habari mupya kutoka oms session_86_4279_swc.wav,27541,pata habari mupya kutoka oms session_87_4791_swc.wav,47891,kiwango ya kweli ya virusi kusambarana haijulikani kwa kutokana na hesabu kidogo ya vipimo yenye ilifanyika session_86_4280_swc.wav,65173,kiwango ya kweli ya virusi kusambarana haijulikani kwa kutokana na hesabu kidogo ya vipimo yenye ilifanyika session_87_4794_swc.wav,37811,virusi inaambukia kupitia matoni yenye inatoka wakati ya kupumua na mu bitu session_86_4283_swc.wav,64213,virusi inaambukia kupitia matoni yenye inatoka wakati ya kupumua na mu bitu session_87_4795_swc.wav,62579,virusi ya corona ni virusi kubwa iyi inamaanisha kama siyo haiyambukie kabisa kupitia mu hewa iko mu matoni ya pumuzi session_87_4796_swc.wav,64691,kwa kuheshimia umbali wa mita mbili migulu sita na wengine kwa ujumla inawezekana kuzuia maambukizi kupitia iyi njia session_86_4285_swc.wav,77173,kwa kuheshimia umbali wa mita mbili migulu sita na wengine kwa ujumla inawezekana kuzuia maambukizi kupitia iyi njia session_87_4797_swc.wav,80051,maambukizi kupitia bitu inawezekana kwa mfano kama mtu anakohola na kugusa mkono ya mulango kisha wengine wanagusa mkono poignet ya mulango na kisha sura zao session_87_4799_swc.wav,24659,maambukizi kupitia kinywa na mavi nayo inawezekana session_86_4288_swc.wav,52693,maambukizi kupitia kinywa na mavi nayo inawezekana session_87_4801_swc.wav,32531,alama za kawaida ni kama vile homa kikohozi ya kukauka na muchoko session_86_4290_swc.wav,69781,alama za kawaida ni kama vile homa kikohozi ya kukauka na muchoko session_87_4802_swc.wav,64691,shida ya kupumua maumivu ndani ya shingo maumivu ya kichwa maumivu ya mwili ao kukohola bya makamasi nazo ni moja ya alama zenye haziko za kawaida session_87_4803_swc.wav,36371,wagonjwa wengine wanakuwaka na alama za kidogo sana zenye kufanana za homa session_86_4292_swc.wav,46933,wagonjwa wengine wanakuwaka na alama za kidogo sana zenye kufanana za homa session_87_4804_swc.wav,76019,shida kali ni pamoja na magonjwa ya mafafa ugonjwa wa shida kali ya kupumua na upungufu ya uwezo ya viungo mingi ya mwili yenye inasababisha ulemavu ao kifo session_87_4809_swc.wav,66419,watoto kidogo waliripotiwa kugonjwa na wengi kati yabo wako na ma alama kidogo na za kawaida hata kama wengi kati yabo wanapatikana na ugonjwa ya mafafa session_87_4810_swc.wav,52499,kwa kweli wafanyakazi wa kiafya wako mu hatari kubwa kuliko wengine na vikundi vya magonjwa kati ya wafanyakazi na katika vituo vya afya session_86_4299_swc.wav,71413,kwa kweli wafanyakazi wa kiafya wako mu hatari kubwa kuliko wengine na vikundi vya magonjwa kati ya wafanyakazi na katika vituo vya afya session_87_4813_swc.wav,55379,maambukizi kupitia watu wasiyo onesha alama ilijulikana lakini kiwango ya iyi hali bado ingali kwenye uchunguzi session_86_4302_swc.wav,58741,maambukizi kupitia watu wasiyo onesha alama ilijulikana lakini kiwango ya iyi hali bado ingali kwenye uchunguzi session_87_4814_swc.wav,38291,uwezo wa kuambukizwa tena na virusi kisha kuipona kwa mara ya kwanza haiyahakikishwa session_86_4303_swc.wav,46261,uwezo wa kuambukizwa tena na virusi kisha kuipona kwa mara ya kwanza haiyahakikishwa session_87_4815_swc.wav,69491,vinyume vya muda mrefu kwa watu waliopona bado haiya julikani lakini upungufu wa uwezo ya kupumua ilioneshwa kwa wagonjwa mbalimbali waliopona session_87_4816_swc.wav,70739,kipimo ya virusi inafanyika kwa kusindiria ka chombo ka plastique mu pua ao mu shingo na kupata echantillon yenye itapimwa mu laboratoire chumba ya kipimo juu ya kugundua virusi session_87_4817_swc.wav,46451,kipimo ya musingi ya sasa inafanywa kwa kukamata echantillon na kuichunguza ili kutafuta ma alama za kijenetiki zenye za virusi session_87_4818_swc.wav,48371,hakuna kipimo inayo tambulika kwa kupima virusi ao kinga ya mwili katika damu atakama vipimo vinaendelea kufanywa session_86_4307_swc.wav,72181,hakuna kipimo inayo tambulika kwa kupima virusi ao kinga ya mwili katika damu atakama vipimo vinaendelea kufanywa session_87_4819_swc.wav,28499,hakuna kipimo ya kuhakikisha gisi ya kutoambukizwa virusi session_86_4308_swc.wav,47893,hakuna kipimo ya kuhakikisha gisi ya kutoambukizwa virusi session_87_4820_swc.wav,50771,serkali mingi mu dunia nzima zilishauri raia wao kutosafiri bila sababu ya maana wakati wa muripuko ya ugonjwa session_86_4309_swc.wav,67861,serkali mingi mu dunia nzima zilishauri raia wao kutosafiri bila sababu ya maana wakati wa muripuko ya ugonjwa session_87_4821_swc.wav,68339,kampuni mingi za ndege na zenye kutayarisha safari za tourisme zinakurahisishia kwa kutowa malipo ya kutosafiri tena na ya kubadilisha siku ya kusaifiri session_87_4822_swc.wav,23411,hasa epuka kusafiri na mashua za starehe session_86_4311_swc.wav,28501,hasa epuka kusafiri na mashua za starehe session_87_4824_swc.wav,49139,mu ma eneo yaliyo gusiwa watafiti wengi wanashauria tabia inayo itwa umbali wa kimwili ao kutosogeleana session_86_4313_swc.wav,69301,mu ma eneo yaliyo gusiwa watafiti wengi wanashauria tabia inayo itwa umbali wa kimwili ao kutosogeleana session_87_4825_swc.wav,58259,iyi inamaanisha kupunguza mawasiliano na wengine umbali ya metres mbili ao migulu sita nabo na kuepuka mikusanyiko session_86_4314_swc.wav,75733,iyi inamaanisha kupunguza mawasiliano na wengine umbali ya metres mbili ao migulu sita nabo na kuepuka mikusanyiko session_87_4826_swc.wav,41459,kuko maeneo zenye zinaruhusu makundi kidogo zingine zinakataza mikusanyiko yoyote ile session_86_4315_swc.wav,58261,kuko maeneo zenye zinaruhusu makundi kidogo zingine zinakataza mikusanyiko yoyote ile session_87_4827_swc.wav,67859,hatua zenye unashauriwa kukamata ni kutumikia kunyumba ikiwezekana kuepuka makundi ya watu na kutoka kunyumba isipokuwa ni lazima kabisa session_87_4829_swc.wav,21299,mu maeneo mingi hizi hatua zinalazimishwa session_86_4318_swc.wav,29653,mu maeneo mingi hizi hatua zinalazimishwa session_87_4830_swc.wav,27059,heshimia kanuni za usafi kama zile za kuzuia mafua session_86_4319_swc.wav,35413,heshimia kanuni za usafi kama zile za kuzuia mafua session_87_4831_swc.wav,45971,nawa mikono yako mara kwa mara na maji ya moto na sabuni kisha uikaushe kwa kutumia kitambala ya kutakata session_86_4320_swc.wav,66901,nawa mikono yako mara kwa mara na maji ya moto na sabuni kisha uikaushe kwa kutumia kitambala ya kutakata session_87_4832_swc.wav,78419,kwa kuwa virusi ya corona ni virusi inayo funikwa kunawa mikono na sabuni inauwa virusi kwa kuaribisha iyo mufuniko ya virusi yenye inakuwaka ya mafuta mafuta session_87_4834_swc.wav,66419,kukausha mikono iliyo safishwa inasaidia kuondoa mikrobe ao vidudu ku ngozi kwa hivyo usiruke iyi hatua na usichangie kitambala ya kupanguza session_87_4837_swc.wav,29939,epuka kugusa macho yako pua na mudomo na mikono yako session_86_4326_swc.wav,28981,epuka kugusa macho yako pua na mudomo na mikono yako session_87_4838_swc.wav,28019,watu wengi wanajigusa sura kisha kila dakika chache kwa siku session_86_4327_swc.wav,26581,watu wengi wanajigusa sura kisha kila dakika chache kwa siku session_87_4839_swc.wav,30899,jaribu kuifanya mara kidogo na nawa mikono mbele ya kugusa sura wako session_86_4328_swc.wav,36853,jaribu kuifanya mara kidogo na nawa mikono mbele ya kugusa sura wako session_87_4840_swc.wav,37331,jaribu pia kuepuka kugusa fasi zenye hauhitaji kugusa hasa kwa mikono wazi session_86_4329_swc.wav,47701,jaribu pia kuepuka kugusa fasi zenye hauhitaji kugusa hasa kwa mikono wazi session_87_4841_swc.wav,58931,kohola na upige chafya ndani ya mukonjo ya konongo ao kitambala mouchoir kisha utupe hiyo kitambala na unawe mikono yako mara moja session_86_4330_swc.wav,77653,kohola na upige chafya ndani ya mukonjo ya konongo ao kitambala mouchoir kisha utupe hiyo kitambala na unawe mikono yako mara moja session_87_4842_swc.wav,35699,usisimame ao kuikala karibu na watu ambao wanaweza kuwa wagonjwa session_86_4331_swc.wav,40021,usisimame ao kuikala karibu na watu ambao wanaweza kuwa wagonjwa session_87_4843_swc.wav,36659,bakia kwa umbali wa karibu metre moja na bora zaidi metre mbili migulu sita session_86_4332_swc.wav,47893,bakia kwa umbali wa karibu metre moja na bora zaidi metre mbili migulu sita session_87_4844_swc.wav,51731,kwa kupima iyi umbali kwa haraka fikiria kuwa weye na mtu mwingine munanyorosheana mikono mumoja kwa mwengine session_86_4333_swc.wav,68053,kwa kupima iyi umbali kwa haraka fikiria kuwa weye na mtu mwingine munanyorosheana mikono mumoja kwa mwengine session_87_4845_swc.wav,33299,unaweza kugusa mukono ya mtu mwengine bila kupiga hatua ya kumujogelea session_86_4334_swc.wav,39253,unaweza kugusa mukono ya mtu mwengine bila kupiga hatua ya kumujogelea session_87_4846_swc.wav,23699,kama jibu ni ndio uko karibu sana session_86_4335_swc.wav,23893,kama jibu ni ndio uko karibu sana session_87_4847_swc.wav,74579,safisha vitu na fasi zenye watu wengi wanagusa kama vile mikono poignees ya mulango telefone na telecommande ya television kwa kutumia kitu ya kupanguza ya kawaida session_87_4848_swc.wav,41459,safisha ma fasi kwa kutumia dawa inayofaa kama vile eau de javel yenye kuchangwa na maji session_86_4337_swc.wav,44533,safisha ma fasi kwa kutumia dawa inayofaa kama vile eau de javel yenye kuchangwa na maji session_87_4849_swc.wav,36851,bakia nyumbani kama unagonjwa na epuka kugusana na wengine mpaka alama zako zipotee session_86_4338_swc.wav,37621,bakia nyumbani kama unagonjwa na epuka kugusana na wengine mpaka alama zako zipotee session_87_4850_swc.wav,62099,usichangie vifaa binafsi ambavyo vinagusa mate kama vile brosse a dents mifuniko vinywaji chupa za maji na serviettes vitambala session_86_4339_swc.wav,72661,usichangie vifaa binafsi ambavyo vinagusa mate kama vile brosse a dents mifuniko vinywaji chupa za maji na serviettes vitambala session_87_4851_swc.wav,46739,kutumikisha baguettes tu miti ku sahani moja hasa inchini china inapashwa epukwa session_86_4340_swc.wav,60853,kutumikisha baguettes tu miti ku sahani moja hasa inchini china inapashwa epukwa session_87_4852_swc.wav,19571,salimia watu bila kuwagusa session_86_4341_swc.wav,18901,salimia watu bila kuwagusa session_87_4853_swc.wav,47219,epuka kukumbatiana kupana bizu kukamatana mikono ngumi kwa ngumi na mawasiliano yoyote ile session_86_4342_swc.wav,54901,epuka kukumbatiana kupana bizu kukamatana mikono ngumi kwa ngumi na mawasiliano yoyote ile session_87_4854_swc.wav,32339,kama haiwezekani kuepuka mawasiliano nawa mikono yako mbele na kisha session_86_4343_swc.wav,40213,kama haiwezekani kuepuka mawasiliano nawa mikono yako mbele na kisha session_87_4855_swc.wav,63251,hiyo haitakukinga na virusi ya corona lakini kwa ngambo moja itakulinda itakukinga na mafua ambayo bado ni hatari kubwa kuliko corona mu maeneo mingi session_86_4344_swc.wav,71413,hiyo haitakukinga na virusi ya corona lakini kwa ngambo moja itakulinda itakukinga na mafua ambayo bado ni hatari kubwa kuliko corona mu maeneo mingi session_87_4856_swc.wav,46931,itakusaidia pia usikuwe na wasiwasi kama unapata mafua na kuzani kama inaweza kuwa labda virusi ya corona session_86_4345_swc.wav,65173,itakusaidia pia usikuwe na wasiwasi kama unapata mafua na kuzani kama inaweza kuwa labda virusi ya corona session_87_4857_swc.wav,59699,kama uko mzee ao unapatikana kwenye kundi ingine ya hatari kubwa na unaweza kupata chanjo ya kujikinga na ugonjwa ya mafafa fanya hivyo session_86_4346_swc.wav,66901,kama uko mzee ao unapatikana kwenye kundi ingine ya hatari kubwa na unaweza kupata chanjo ya kujikinga na ugonjwa ya mafafa fanya hivyo session_87_4859_swc.wav,47411,kuvala mask inashauriwa kwa wale wote wanao shakiwa kuwa na ugonjwa na wale wenye kuwa karibu na watu waliyo ambukizwa session_86_4348_swc.wav,58261,kuvala mask inashauriwa kwa wale wote wanao shakiwa kuwa na ugonjwa na wale wenye kuwa karibu na watu waliyo ambukizwa session_87_4860_swc.wav,42419,kwa watu wasiyo kuwa na alama oms haishauri kuvala mask hata kama inchi zingine zinafanya hivyo session_86_4349_swc.wav,49621,kwa watu wasiyo kuwa na alama oms haishauri kuvala mask hata kama inchi zingine zinafanya hivyo session_87_4861_swc.wav,48851,kuvala aina fulani ya mask inalazimishwa mu nchi na miji mbalimbali juu ya kupunguza maambukizi katika jamii session_86_4350_swc.wav,68533,kuvala aina fulani ya mask inalazimishwa mu nchi na miji mbalimbali juu ya kupunguza maambukizi katika jamii session_87_4862_swc.wav,20051,kuko ukosefu wa mask za kinganga mu dunia session_86_4351_swc.wav,28693,kuko ukosefu wa mask za kinganga mu dunia session_87_4863_swc.wav,21299,kama unavala mask hakikisha kama unaitumia muzuri session_86_4352_swc.wav,39253,kama unavala mask hakikisha kama unaitumia muzuri session_87_4864_swc.wav,36659,mask inapashwa kufunika pua yako na mudomo na kuenea muzuri bila kuacha fasi ya wazi session_86_4353_swc.wav,47413,mask inapashwa kufunika pua yako na mudomo na kuenea muzuri bila kuacha fasi ya wazi session_87_4865_swc.wav,29171,nawa mikono yako mbele ya kuvala mask na epuka kuigusa wakati unaivala session_86_4354_swc.wav,50293,nawa mikono yako mbele ya kuvala mask na epuka kuigusa wakati unaivala session_87_4866_swc.wav,21011,ukiugusa nawa mikono yako mara moja session_86_4355_swc.wav,35893,ukiugusa nawa mikono yako mara moja session_87_4867_swc.wav,26291,wakati mask inalobana itupe mbali na uibadilishe session_86_4356_swc.wav,42613,wakati mask inalobana itupe mbali na uibadilishe session_87_4868_swc.wav,29459,uitoshe kwa nyuma uitupe kisha nawa mikono yako session_86_4357_swc.wav,38101,uitoshe kwa nyuma uitupe kisha nawa mikono yako session_87_4869_swc.wav,38099,kumbuka kwamba mask hazikamati fasi ya usafi bora endelea kunawa mikono mara kwa mara session_86_4358_swc.wav,49141,kumbuka kwamba mask hazikamati fasi ya usafi bora endelea kunawa mikono mara kwa mara session_87_4870_swc.wav,17939,usiuze mask kama hauzihitaji session_86_4359_swc.wav,24181,usiuze mask kama hauzihitaji session_87_4871_swc.wav,53459,kwa sababu ya ukosefu wa mask wafanyakazi kiafya wako na shida ya kupata mask na iyi inatia kila mtu katika hatari session_86_4360_swc.wav,58741,kwa sababu ya ukosefu wa mask wafanyakazi kiafya wako na shida ya kupata mask na iyi inatia kila mtu katika hatari session_87_4873_swc.wav,16979,muviachie wafanyakazi wa kiafya session_86_4362_swc.wav,27253,muviachie wafanyakazi wa kiafya session_87_4874_swc.wav,76691,epuka maeneo yenye kutembelewa sana hasa maeneo iliyo fungwa na zenye kuwa na hewa kidogo kama vile mikutano tamasha maduka kubwa usafirishaji ya watu wengi na mikutano ya kidini session_87_4875_swc.wav,65939,matukio inayo husisha mukusanyiko kubwa ya watu matembezi wa kidini mpaka matamasha zilikatazwa duniani kote juu ya kuzuwia virusi kusambarana session_87_4876_swc.wav,47699,fasi zinazo tembelewa na wa touristes wageni kampuni na usafirishaji zinaweza fungwa hasa katika inchi zilizo guswa session_86_4365_swc.wav,67861,fasi zinazo tembelewa na wa touristes wageni kampuni na usafirishaji zinaweza fungwa hasa katika inchi zilizo guswa session_87_4877_swc.wav,73331,kuko matukio yenye ilikatazwa hasa ma spectacles tamasha mipango ya michezo na mafundisho za masomo zinapanwa ku mitandao iyi inamaanisha kama unaweza kuifata bila kusafiri session_87_4878_swc.wav,64499,mu ma stations za kuuzishia essence tumia gants vifaa ya kukinga mikono ao upanguze mikono na kitambala yenye iko amo dawa kama inawezekana session_87_4879_swc.wav,36851,kisha kuuza essence nawa mikono yako na dawa ya maji ya kusafisha mikono desinfectant session_86_4368_swc.wav,47413,kisha kuuza essence nawa mikono yako na dawa ya maji ya kusafisha mikono desinfectant session_87_4880_swc.wav,32531,serkali ya amerika na canada zinashauri kuepuka kusafiri na mashua bateau ya starehe session_86_4369_swc.wav,52693,serkali ya amerika na canada zinashauri kuepuka kusafiri na mashua bateau ya starehe session_87_4881_swc.wav,30419,maambukizi inasambarana kwa urahisi mu mashua na huduma za afya ziko kidogo session_86_4370_swc.wav,49333,maambukizi inasambarana kwa urahisi mu mashua na huduma za afya ziko kidogo session_87_4882_swc.wav,51539,wakati ya muripuko ya magonjwa kwenye mashua ya starehe kuwafungia watu na kutia nanga inakuwaka shida kwa sababu watu ni wengi ndani ya mashua session_87_4884_swc.wav,61811,kama unawaza ulisha ambukizwa pigia hospitali simu ao huduma za afya haraka mbele ya kwenda huko wepeke juu ya kuepuka kuambukiza wengine session_86_4373_swc.wav,77173,kama unawaza ulisha ambukizwa pigia hospitali simu ao huduma za afya haraka mbele ya kwenda huko wepeke juu ya kuepuka kuambukiza wengine session_87_4885_swc.wav,23411,onesha alama zako na eneo zenye ulisafiria session_86_4374_swc.wav,27253,onesha alama zako na eneo zenye ulisafiria session_87_4886_swc.wav,30419,vala mask ya kiganga na ufate mashauri ya waongozi na waganga session_86_4375_swc.wav,32533,vala mask ya kiganga na ufate mashauri ya waongozi na waganga session_87_4887_swc.wav,23411,angalia pia safari ya ndege na afya session_86_4376_swc.wav,23893,angalia pia safari ya ndege na afya session_87_4889_swc.wav,52499,watafiti wamegundua kwamba wasafiri wenye wanaikala ku biti ya pembeni ya dirisha wako mu hatari kidogo ya kuwasiliana na watu wagonjwa session_86_4378_swc.wav,78613,watafiti wamegundua kwamba wasafiri wenye wanaikala ku biti ya pembeni ya dirisha wako mu hatari kidogo ya kuwasiliana na watu wagonjwa session_87_4890_swc.wav,38099,jaribu ku lipiya kiti ya pembeni ya dirisha na epuka kuzunguruka mu ndege wakati ya safari session_86_4379_swc.wav,42133,jaribu ku lipiya kiti ya pembeni ya dirisha na epuka kuzunguruka mu ndege wakati ya safari session_87_4891_swc.wav,44819,kisha kunawa mikono na mbele ya kuikala tumikisha kitambala yenye iko amo dawa kwa kupanguza buchafu pembeni ya kiti yako session_86_4380_swc.wav,59701,kisha kunawa mikono na mbele ya kuikala tumikisha kitambala yenye iko amo dawa kwa kupanguza buchafu pembeni ya kiti yako session_87_4892_swc.wav,33971,panguza fasi za ngufu na kama kiti yako ni ya ngozi uipanguze pia session_86_4381_swc.wav,44053,panguza fasi za ngufu na kama kiti yako ni ya ngozi uipanguze pia session_87_4893_swc.wav,34739,usipanguze kiti yakufunikwa na kitambala juu baridi inaweza sababisha maambukizi session_86_4382_swc.wav,51061,usipanguze kiti yakufunikwa na kitambala juu baridi inaweza sababisha maambukizi session_87_4894_swc.wav,29939,wakati ya kutumia kitambala ya kutoa buchafu fuata maelekezo yenye iko ku emballage session_86_4383_swc.wav,46453,wakati ya kutumia kitambala ya kutoa buchafu fuata maelekezo yenye iko ku emballage session_87_4895_swc.wav,56531,na usisahabu virusi inaambukia kwa njia ya kinywa pua na macho unaweza kusukula iyo eneo lakini haitakamata fasi usafi bora session_87_4896_swc.wav,27059,nawa mikono yako na epuka kugusa sura yako session_86_4385_swc.wav,22741,nawa mikono yako na epuka kugusa sura yako session_87_4897_swc.wav,26771,tumia pia kitambala mouchoir kwa kugusa ecran ao boutons zingine session_86_4386_swc.wav,34261,tumia pia kitambala mouchoir kwa kugusa ecran ao boutons zingine session_87_4898_swc.wav,40499,mu toilette wc tumikisha kipanguziyo ya kartasi kwa kufunga bomba na kufungua mulango kisha uitupe session_86_4387_swc.wav,65173,mu toilette wc tumikisha kipanguziyo ya kartasi kwa kufunga bomba na kufungua mulango kisha uitupe session_87_4899_swc.wav,48179,kampuni za ndege mu eneo zilizo guswa zinakamata kwa sasa hatua za kupunguza maambukizi na kuhakikisha usalama ya wasafiri session_86_4388_swc.wav,68053,kampuni za ndege mu eneo zilizo guswa zinakamata kwa sasa hatua za kupunguza maambukizi na kuhakikisha usalama ya wasafiri session_87_4902_swc.wav,27731,inaonekana kama hauna ruhusa ya kubadilisha kiti wakati wa safari ya ndege session_86_4391_swc.wav,42613,inaonekana kama hauna ruhusa ya kubadilisha kiti wakati wa safari ya ndege session_87_4903_swc.wav,69011,kama musafiri kwenye ndege anaambukizwa iyi hatua inasaidia viongozi kupata watu wenye waliikala karibu naye juu ya kuwapima ao kuwatenga mbali na wengine session_86_4392_swc.wav,74773,kama musafiri kwenye ndege anaambukizwa iyi hatua inasaidia viongozi kupata watu wenye waliikala karibu naye juu ya kuwapima ao kuwatenga mbali na wengine session_87_4904_swc.wav,35891,matunzo ya virusi ya corona ni ya kupunguza alama na kuzuwia shida za kujitokeza session_86_4393_swc.wav,59413,matunzo ya virusi ya corona ni ya kupunguza alama na kuzuwia shida za kujitokeza session_87_4905_swc.wav,28979,hakuna chanjo ao matunzo kamili ya virusi ya corona ya mupya session_86_4394_swc.wav,37141,hakuna chanjo ao matunzo kamili ya virusi ya corona ya mupya session_87_4906_swc.wav,26771,utafiti kuhusu chanjo ao matunzo ya kupiganisha virusi inaendelea session_86_4395_swc.wav,38101,utafiti kuhusu chanjo ao matunzo ya kupiganisha virusi inaendelea session_87_4907_swc.wav,59219,uchunguzi mingi inafanyika kwa njia ya ma dawa za kupiganisha ukimwi na dawa zingine za mupya zinazo punguza makali ya virusi kwa kutunza virusi ya corona session_87_4908_swc.wav,25331,alama za kidogo zinaweza kupona kwa kutumia paracetamol session_86_4397_swc.wav,34741,alama za kidogo zinaweza kupona kwa kutumia paracetamol session_87_4909_swc.wav,54899,kama unaambukizwa na virusi ya corona inchi zitakutenga mpaka vipimo mingi vya kufatana vifanyike na kuhakikisha kama hauna virusi ya corona session_86_4398_swc.wav,76213,kama unaambukizwa na virusi ya corona inchi zitakutenga mpaka vipimo mingi vya kufatana vifanyike na kuhakikisha kama hauna virusi ya corona session_87_4911_swc.wav,21299,inchi zingine zitakupima pia hata kama hauna alama yoyote session_86_4400_swc.wav,34933,inchi zingine zitakupima pia hata kama hauna alama yoyote session_87_4913_swc.wav,62579,inchi mingi zimesimamisha ao zilipunguza safari za ndege za mashua na kuvuka mipaka hasa zaidi tena kwenda na kutoka mu maeneo zilizo guswa na maambukizi session_86_4402_swc.wav,80053,inchi mingi zimesimamisha ao zilipunguza safari za ndege za mashua na kuvuka mipaka hasa zaidi tena kwenda na kutoka mu maeneo zilizo guswa na maambukizi session_87_4915_swc.wav,56051,hata kama kutengwa mbali na wengine hailazimishwi unaweza ombwa kujitenga mwenyewe kwa kubakia nyumbani na kuepuka kuwasiliana na wengine session_86_4404_swc.wav,64501,hata kama kutengwa mbali na wengine hailazimishwi unaweza ombwa kujitenga mwenyewe kwa kubakia nyumbani na kuepuka kuwasiliana na wengine session_87_4916_swc.wav,63731,hatua kali zaidi zinahusu wasafiri ambao walitembelea mu maeneo yaliyo ambukizwa lakini inazidi inchi mingi zinazitumia kwa wasafiri wote wanao ingia session_86_4405_swc.wav,79861,hatua kali zaidi zinahusu wasafiri ambao walitembelea mu maeneo yaliyo ambukizwa lakini inazidi inchi mingi zinazitumia kwa wasafiri wote wanao ingia session_87_4917_swc.wav,68051,kuko inchi zenye zinakatala watu wote ao karibu wageni wote kuingia ao zinapunguza hesabu ya raia na wakaaji wa kawaida wa ile eneo kusafiri session_86_4406_swc.wav,72853,kuko inchi zenye zinakatala watu wote ao karibu wageni wote kuingia ao zinapunguza hesabu ya raia na wakaaji wa kawaida wa ile eneo kusafiri session_87_4918_swc.wav,17651,ni maeneo gani njo zinaguswa session_86_4407_swc.wav,21493,ni maeneo gani njo zinaguswa session_87_4919_swc.wav,50099,iyi ni hali inayo badilika haraka na kila nchi jimbo shirika iko na liste ya maeneo yenye kuwa chini ya masharti session_86_4408_swc.wav,80341,iyi ni hali inayo badilika haraka na kila nchi jimbo shirika iko na liste ya maeneo yenye kuwa chini ya masharti session_87_4920_swc.wav,35411,muji ya wuhan iko kwa hizo listes na kipande ingine ya china ku za mingi session_86_4409_swc.wav,42421,muji ya wuhan iko kwa hizo listes na kipande ingine ya china ku za mingi session_87_4922_swc.wav,33779,inchi zingine zinaweka hatua kwa raia na wakaaji wa maeneo yenye inaambukizwa sana session_86_4411_swc.wav,58261,inchi zingine zinaweka hatua kwa raia na wakaaji wa maeneo yenye inaambukizwa sana session_87_4923_swc.wav,33779,kartasi ya mafasirio zaidi kuhusu hatua za kuingia inatolewa kwa siku na iata session_86_4412_swc.wav,60853,kartasi ya mafasirio zaidi kuhusu hatua za kuingia inatolewa kwa siku na iata session_87_4924_swc.wav,21971,haiongelei hatua zote lakini ni ya maana session_86_4413_swc.wav,39733,haiongelei hatua zote lakini ni ya maana session_87_4925_swc.wav,32051,endelea na pata habari ugonjwa na kanuni za kusafiri zinabadilika haraka session_86_4414_swc.wav,48853,endelea na pata habari ugonjwa na kanuni za kusafiri zinabadilika haraka session_87_4926_swc.wav,67379,kuhusu safari mu siku za mbele fikiria kufanya reservation yenye wanaweza kurudishia franga kama mabadiliko ya hali itakulazimisha ubadilishe mipango yako session_87_4927_swc.wav,49331,epuka kuuza billets katika eneo iliyo guswa na maambukizi mabadiliko kidogo ya ndege inaweza sababisha unazuwiwa session_86_4416_swc.wav,70261,epuka kuuza billets katika eneo iliyo guswa na maambukizi mabadiliko kidogo ya ndege inaweza sababisha unazuwiwa session_87_4929_swc.wav,76019,unaweza pia kucheleweshwa kwa muda ya saa mingi kisha kufika kwa sababu ya kupimwa homa na hatua zinazo husiana nazo ao hata kutengwa mbali na wengine kwa muda wa juma mbili session_87_4930_swc.wav,38291,kuwa tayari kwa mabadiliko ya mipango ya safari yako yenye inaweza tokea hasa kama unasafiri inje ya inchi session_86_4419_swc.wav,61333,kuwa tayari kwa mabadiliko ya mipango ya safari yako yenye inaweza tokea hasa kama unasafiri inje ya inchi session_87_4933_swc.wav,18131,kutengwa mbali na watu wengine na hatua za ndani session_86_4422_swc.wav,24373,kutengwa mbali na watu wengine na hatua za ndani session_87_4936_swc.wav,55571,zaidi ya hatua za serkali vituo vya watu binafsi vinafunga na kushimamisha matukio juu ya kujaribu kupunguza kusambarana kwa virusi session_86_4425_swc.wav,76981,zaidi ya hatua za serkali vituo vya watu binafsi vinafunga na kushimamisha matukio juu ya kujaribu kupunguza kusambarana kwa virusi session_87_4937_swc.wav,63539,huko china hatua za maana zinatofautiana sana kufatana na jimbo muji hata wilaya ao kijiji ziliwekwa kuhusu usafirishaji na shuruli session_87_4938_swc.wav,28979,serkali mingi zinatowa shauri ya kutosafiri kwenda china iyi wakati session_86_4427_swc.wav,50773,serkali mingi zinatowa shauri ya kutosafiri kwenda china iyi wakati session_87_4939_swc.wav,38099,kama hauna namna ingine jielimishe na upate habari za sasa kuhusu hali ya mahali kwenye unapatikana session_86_4428_swc.wav,61141,kama hauna namna ingine jielimishe na upate habari za sasa kuhusu hali ya mahali kwenye unapatikana session_87_4940_swc.wav,38291,watu ambao walitembelea jimbo ya hubei wanalazimishwa kuheshimia hatua kali zilizo chukuliwa session_86_4429_swc.wav,59413,watu ambao walitembelea jimbo ya hubei wanalazimishwa kuheshimia hatua kali zilizo chukuliwa session_87_4941_swc.wav,27059,mifano ya hatua mu maeneo mbalimbali za china iyi liste ni ya mufupi session_86_4430_swc.wav,48661,mifano ya hatua mu maeneo mbalimbali za china iyi liste ni ya mufupi session_87_4942_swc.wav,77651,kutengwa mbali na wengine kwa siku kumi na ine kujitengwa mwenyewe na watu wengine ao kipimo ya virusi ya corona wakati wakufika kutokea mu maeneo zingine za china ao inchi zingine kwa garama yako session_87_4943_swc.wav,50291,maswali kuhusu safari yako ao matatizo yako ya kiafya ya mbele kudanganya inaweza tuma unafungwa kwa miaka mingi ndani ya jela session_86_4432_swc.wav,68341,maswali kuhusu safari yako ao matatizo yako ya kiafya ya mbele kudanganya inaweza tuma unafungwa kwa miaka mingi ndani ya jela session_87_4944_swc.wav,41651,kulazimishwa kujiandikisha kupitia formulaire kwenye mtandao internet ao kupitia kartasi session_86_4433_swc.wav,55381,kulazimishwa kujiandikisha kupitia formulaire kwenye mtandao internet ao kupitia kartasi session_87_4945_swc.wav,23219,kulazimishwa kuvala mask ya kujikinga wakati uko kati ya watu wengi session_86_4434_swc.wav,39253,kulazimishwa kuvala mask ya kujikinga wakati uko kati ya watu wengi session_87_4946_swc.wav,65939,kukatazwa kwenda mahali kwenye kuko watu wengi kama vile fasi ya kufanyia shuruli za biashara kwa watu wenye walikuwa inje ya china mu siku kumi na ine zilizo pita session_87_4947_swc.wav,32531,kulipa kwa njia ya simu hapana franga ya kartasi ndani ya ma duka session_86_4436_swc.wav,34741,kulipa kwa njia ya simu hapana franga ya kartasi ndani ya ma duka session_87_4948_swc.wav,49331,huduma zinazo kataliwa kwa raia wa inchi zingine ndani ya ma restaurants kampuni za usafirishaji na huduma zingine session_86_4437_swc.wav,73141,huduma zinazo kataliwa kwa raia wa inchi zingine ndani ya ma restaurants kampuni za usafirishaji na huduma zingine session_87_4949_swc.wav,26099,kupunguza ao kusimamisha kwa namna mingi za usafirishaji session_86_4438_swc.wav,40693,kupunguza ao kusimamisha kwa namna mingi za usafirishaji session_87_4950_swc.wav,37811,kulazimishwa kujiandikisha ku jina yako ya kweli kwa kadi za usafiri ya watu wengi session_86_4439_swc.wav,47221,kulazimishwa kujiandikisha ku jina yako ya kweli kwa kadi za usafiri ya watu wengi session_87_4951_swc.wav,26291,kufungwa kwa muji yote kijiji ao muji mukubwa session_86_4440_swc.wav,34741,kufungwa kwa muji yote kijiji ao muji mukubwa session_87_4952_swc.wav,23699,kukatazwa kuingia mu ma lupango kwenye watu hawaishi session_86_4441_swc.wav,44821,kukatazwa kuingia mu ma lupango kwenye watu hawaishi session_87_4953_swc.wav,23411,ruhusa ya kwenda tu inje kila siku mbili juu ya kufanya shuruli session_86_4442_swc.wav,41653,ruhusa ya kwenda tu inje kila siku mbili juu ya kufanya shuruli session_87_4954_swc.wav,21011,kufunga ma kampuni na fasi za michezo na kukatazwa kufanya shuruli session_86_4443_swc.wav,42133,kufunga ma kampuni na fasi za michezo na kukatazwa kufanya shuruli session_87_4955_swc.wav,32051,kampuni zinazo hitaji ruhusa ya serkali juu ya kufungula tena session_86_4444_swc.wav,46453,kampuni zinazo hitaji ruhusa ya serkali juu ya kufungula tena session_87_4956_swc.wav,37811,kulazimishwa kuheshimia umbali ya kadiri kati ya wateja ndani ya ma fasi za kukukiliya kama zinafunguliwa session_86_4445_swc.wav,73813,kulazimishwa kuheshimia umbali ya kadiri kati ya wateja ndani ya ma fasi za kukukiliya kama zinafunguliwa session_87_4957_swc.wav,45299,uchuruzi wa mask ya kujikinga ya uwezo ya chini mpaka ya uwezo ya kawaida inaweza kuazibiwa kwa kifungo cha maisha session_86_4446_swc.wav,71221,uchuruzi wa mask ya kujikinga ya uwezo ya chini mpaka ya uwezo ya kawaida inaweza kuazibiwa kwa kifungo cha maisha session_87_4959_swc.wav,67859,lakini kampuni zingine zaidi ya maduka za kuuzisha chakula ma pharmacies zinafungwa na usafirishaji ya watu wengi zilipunguzwa ao kusimamishwa session_86_4448_swc.wav,78421,lakini kampuni zingine zaidi ya maduka za kuuzisha chakula ma pharmacies zinafungwa na usafirishaji ya watu wengi zilipunguzwa ao kusimamishwa session_87_4960_swc.wav,69491,mu inchi zenye ziliguswa sana na maambukizi kama vile italia uhispania na ufaransa hatua ya kujifungia mu ma nyumba iliwekwa kama vile china mwezi wa kwanza session_87_4961_swc.wav,31091,zaidi ya hayo umoja ya ulaya ue ilifunga mipaka yake ya inje session_86_4450_swc.wav,43861,zaidi ya hayo umoja ya ulaya ue ilifunga mipaka yake ya inje session_87_4962_swc.wav,25139,virusi ya corona inasambarana pia amerika ya kaskazini session_86_4451_swc.wav,34453,virusi ya corona inasambarana pia amerika ya kaskazini session_87_4964_swc.wav,55379,majimbo mingi miji na mitaa zilitoa maagizo ya kubakiya ku ma nyumba ambazo zinahusu kufungwa kwa ma kampuni zote zisizo kuwa za muhimu session_87_4965_swc.wav,63059,na safari za ndege mingi zilikataziwa maonyo iliyo tolewa na hatua zilizo chukuliwa kusafiri na ndege wakati ya muripuko ya virusi ya corona inaweza kuwa nguvu session_87_4966_swc.wav,16019,kuko safari zenye haziwezekani session_86_4455_swc.wav,44341,kuko safari zenye haziwezekani session_87_4967_swc.wav,48851,zingine zinahusu miunganisho zisizo kuwa za kawaida na escales mingi na kuchelewesha kwa muda mrefu kati ya safari za ndege session_86_4456_swc.wav,73333,zingine zinahusu miunganisho zisizo kuwa za kawaida na escales mingi na kuchelewesha kwa muda mrefu kati ya safari za ndege session_87_4968_swc.wav,31091,katika hali zingine iyi inamaanisha beyi kali sana ya billet tiketi session_86_4457_swc.wav,49813,katika hali zingine iyi inamaanisha beyi kali sana ya billet tiketi session_87_4969_swc.wav,65651,lakini habari njema ni kwamba safari mingi za ndege ziko beyi chini kuliko kawaida kwa sababu ya kupunguka kwa mahitaji na kuko uwezekano ya kuwa na kiti ya wazi karibu yako session_87_4970_swc.wav,78131,tayarisha muda zaidi kwa watu wenye unawasiliana nabo hasa wakati ya mabadiliko kati ya safari ya ndege ya kimataifa na safari ya ndege ya kitaifa na hasa kama safari yako ni kwenda mu inchi iliyo guswa sana na maambukizi session_87_4971_swc.wav,60659,uchunguzi kupima homa hatua za ziada na kungojea inayo husiana inaweza kukamata dakika ao ma saa mbele uruhusiwe kuendelea na safari yako session_87_4972_swc.wav,61811,inaweza kuwa nguvu zaidi kuliko kawaida kuuza billet tikiti ya kwenda ao kutoka eneo inayo onywa ao kutiliwa ma hatua session_87_4973_swc.wav,31091,iyi ni shida ya kweli kama unajaribu kutoka mu eneo iliyo guswa na maambukizi session_86_4462_swc.wav,33013,iyi ni shida ya kweli kama unajaribu kutoka mu eneo iliyo guswa na maambukizi session_87_4975_swc.wav,48659,kuuza billet ya safari ya iyi aina unaweza labda kuwasiliana na kampuni ya ndege ao kutumia njia isiyo julikana sana session_86_4464_swc.wav,72373,kuuza billet ya safari ya iyi aina unaweza labda kuwasiliana na kampuni ya ndege ao kutumia njia isiyo julikana sana session_87_4977_swc.wav,13811,mawasiliano inaweza kuwa shida session_86_4466_swc.wav,21781,mawasiliano inaweza kuwa shida session_87_4978_swc.wav,77171,hatari ya kushindwa kutoka ndani ya muji kwenye ulienda kwa sasa ni kubwa kuliko kawaida kwa sababu ya kuchelewa kwa uchunguzi na vipimo pamoja na hesabu kubwa ya safari zilizo katazwa session_87_4980_swc.wav,28979,safari zingine zinaweza sababisha kutengwa na watu wengine katikati ya safari session_86_4469_swc.wav,40213,safari zingine zinaweza sababisha kutengwa na watu wengine katikati ya safari session_87_4981_swc.wav,57299,sasa kama inawezekana reserver wekeza safari ya ndege ya moja kwa moja na kama haiko vile fikiria kwa uangalifu mahali kwenye unasafiri session_86_4470_swc.wav,79861,sasa kama inawezekana reserver wekeza safari ya ndege ya moja kwa moja na kama haiko vile fikiria kwa uangalifu mahali kwenye unasafiri session_87_4982_swc.wav,26099,wakati hakuna uhakika mipango inaweza kubadilika session_86_4471_swc.wav,38101,wakati hakuna uhakika mipango inaweza kubadilika session_87_4983_swc.wav,50579,consulats na ambassades za mingi zilisafirisha wafanyakazi wasiyo kuwa muhimu na wengine walishimamisha kazi zabo kabisa session_87_4984_swc.wav,43571,unapashwa kupata msaada wa haraka lakini unaweza kuwasiliana na consulat yako ya mbali kama ya karibu inafungwa session_86_4473_swc.wav,65653,unapashwa kupata msaada wa haraka lakini unaweza kuwasiliana na consulat yako ya mbali kama ya karibu inafungwa session_87_4986_swc.wav,63251,inaweza hata kukujulisha kuhusu hali ya fasi kwenye uko na kukujulisha juu ya kanuni na hatua za kusafiri zilizo kamatwa siku zilizo pita session_86_4475_swc.wav,77173,inaweza hata kukujulisha kuhusu hali ya fasi kwenye uko na kukujulisha juu ya kanuni na hatua za kusafiri zilizo kamatwa siku zilizo pita session_87_4987_swc.wav,60659,huduma za kawaida za ma consulats kama vile kutayarisha visa na passeports zinaweza kusimamishwa ao kupunguzwa kwa haraka kulingana na eneo na consulat session_86_4476_swc.wav,72181,huduma za kawaida za ma consulats kama vile kutayarisha visa na passeports zinaweza kusimamishwa ao kupunguzwa kwa haraka kulingana na eneo na consulat session_87_4989_swc.wav,52691,ni kitu ya kawaida kuita iyo ugonjwa kama magonjwa ya mafafa ao virusi ya wachina katika maeneo kama hong kong na taiwan session_86_4478_swc.wav,63733,ni kitu ya kawaida kuita iyo ugonjwa kama magonjwa ya mafafa ao virusi ya wachina katika maeneo kama hong kong na taiwan session_87_4992_swc.wav,24851,heshimia hizi maagizo na epuka vikundi ya watu session_86_4481_swc.wav,27253,heshimia hizi maagizo na epuka vikundi ya watu session_87_4993_swc.wav,67859,kufatana na muripuko ya virusi ya corona chuki kwa wageni xenophobie iliongezeka mu inchi mingi hasa lakini siyo tu kwa watu wanao onekana kuwa wachina session_87_4994_swc.wav,74579,mu dunia kote kunaonekana ongezeko ya matukio ya ubaguzi ya rangi kwa watu kutoka asia ya mashariki ikiwemo miji kubwa kama new york londres na san francisco session_87_4996_swc.wav,78899,viwango ya chuki kwa wageni xenophobie viliongezeka pia asia ya mashariki na ma restaurants na kampuni zingine zilikatala kuhudumia wateja wa kigeni japani na china session_87_4998_swc.wav,26099,hivi ni vyanzo vingine vya habari juu ya muripuko ya virusi ya corona session_86_4487_swc.wav,48373,hivi ni vyanzo vingine vya habari juu ya muripuko ya virusi ya corona session_87_4999_swc.wav,29171,vituo vya kuchunga na kuzuwia magonjwa vya serkali ya marekani session_86_4488_swc.wav,38773,vituo vya kuchunga na kuzuwia magonjwa vya serkali ya marekani session_87_5000_swc.wav,31379,kituo cha ulaya cha kuzuwia na kuchunguza magonjwa habari mupya siku kwa siku session_86_4489_swc.wav,45973,kituo cha ulaya cha kuzuwia na kuchunguza magonjwa habari mupya siku kwa siku session_87_5001_swc.wav,41459,serkali ya uingereza ushauri kwa wasafiri kutoka ministere inayo husika na mambo ya inje na pia ya commonwealth session_86_4490_swc.wav,58933,serkali ya uingereza ushauri kwa wasafiri kutoka ministere inayo husika na mambo ya inje na pia ya commonwealth session_87_5002_swc.wav,61331,carte na tableau ya muongozo kwenye mtandao ya kituo ya sayansi na ufundi cha chuo kikuu cha johns hopkins na habari mupya moja kwa moja session_87_5003_swc.wav,28499,liste ya hatua mbalimbali za kuingia za iata na za new york times session_86_4492_swc.wav,42901,liste ya hatua mbalimbali za kuingia za iata na za new york times session_87_5004_swc.wav,79859,kuko habari mingi za uwongo na vizingishio juu ya virusi zenye zinasemakana kwenye mitandao na hata na wafanyakazi wa serkali kwa hivyo kuwa mwangalifu na njia zenye unatumia kwa kupata habari session_87_5005_swc.wav,46259,hakikisha kama habari zote na mashauri yenye unapata zinahakikishwa na waganga na wanasayansi ambao wanajulikana muzuri session_86_4494_swc.wav,67093,hakikisha kama habari zote na mashauri yenye unapata zinahakikishwa na waganga na wanasayansi ambao wanajulikana muzuri
0
coqui_public_repos/STT/tests
coqui_public_repos/STT/tests/test_data/validate_locale_fra.py
def validate_label(label): return label
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/shortest-path.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions to find shortest paths in an FST. #ifndef FST_SHORTEST_PATH_H_ #define FST_SHORTEST_PATH_H_ #include <functional> #include <type_traits> #include <utility> #include <vector> #include <fst/log.h> #include <fst/cache.h> #include <fst/determinize.h> #include <fst/queue.h> #include <fst/shortest-distance.h> #include <fst/test-properties.h> namespace fst { template <class Arc, class Queue, class ArcFilter> struct ShortestPathOptions : public ShortestDistanceOptions<Arc, Queue, ArcFilter> { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; int32 nshortest; // Returns n-shortest paths. bool unique; // Only returns paths with distinct input strings. bool has_distance; // Distance vector already contains the // shortest distance from the initial state. bool first_path; // Single shortest path stops after finding the first // path to a final state; that path is the shortest path // only when: // (1) using the ShortestFirstQueue with all the weights // in the FST being between One() and Zero() according to // NaturalLess or when // (2) using the NaturalAStarQueue with an admissible // and consistent estimate. Weight weight_threshold; // Pruning weight threshold. StateId state_threshold; // Pruning state threshold. ShortestPathOptions(Queue *queue, ArcFilter filter, int32 nshortest = 1, bool unique = false, bool has_distance = false, float delta = kShortestDelta, bool first_path = false, Weight weight_threshold = Weight::Zero(), StateId state_threshold = kNoStateId) : ShortestDistanceOptions<Arc, Queue, ArcFilter>(queue, filter, kNoStateId, delta), nshortest(nshortest), unique(unique), has_distance(has_distance), first_path(first_path), weight_threshold(std::move(weight_threshold)), state_threshold(state_threshold) {} }; namespace internal { constexpr size_t kNoArc = -1; // Helper function for SingleShortestPath building the shortest path as a left- // to-right machine backwards from the best final state. It takes the input // FST passed to SingleShortestPath and the parent vector and f_parent returned // by that function, and builds the result into the provided output mutable FS // This is not normally called by users; see ShortestPath instead. template <class Arc> void SingleShortestPathBacktrace( const Fst<Arc> &ifst, MutableFst<Arc> *ofst, const std::vector<std::pair<typename Arc::StateId, size_t>> &parent, typename Arc::StateId f_parent) { using StateId = typename Arc::StateId; ofst->DeleteStates(); ofst->SetInputSymbols(ifst.InputSymbols()); ofst->SetOutputSymbols(ifst.OutputSymbols()); StateId s_p = kNoStateId; StateId d_p = kNoStateId; for (StateId state = f_parent, d = kNoStateId; state != kNoStateId; d = state, state = parent[state].first) { d_p = s_p; s_p = ofst->AddState(); if (d == kNoStateId) { ofst->SetFinal(s_p, ifst.Final(f_parent)); } else { ArcIterator<Fst<Arc>> aiter(ifst, state); aiter.Seek(parent[d].second); auto arc = aiter.Value(); arc.nextstate = d_p; ofst->AddArc(s_p, arc); } } ofst->SetStart(s_p); if (ifst.Properties(kError, false)) ofst->SetProperties(kError, kError); ofst->SetProperties( ShortestPathProperties(ofst->Properties(kFstProperties, false), true), kFstProperties); } // Helper function for SingleShortestPath building a tree of shortest paths to // every final state in the input FST. It takes the input FST and parent values // computed by SingleShortestPath and builds into the output mutable FST the // subtree of ifst that consists only of the best paths to all final states. // This is not normally called by users; see ShortestPath instead. template <class Arc> void SingleShortestTree( const Fst<Arc> &ifst, MutableFst<Arc> *ofst, const std::vector<std::pair<typename Arc::StateId, size_t>> &parent) { ofst->DeleteStates(); ofst->SetInputSymbols(ifst.InputSymbols()); ofst->SetOutputSymbols(ifst.OutputSymbols()); ofst->SetStart(ifst.Start()); for (StateIterator<Fst<Arc>> siter(ifst); !siter.Done(); siter.Next()) { ofst->AddState(); ofst->SetFinal(siter.Value(), ifst.Final(siter.Value())); } for (const auto &pair : parent) { if (pair.first != kNoStateId && pair.second != kNoArc) { ArcIterator<Fst<Arc>> aiter(ifst, pair.first); aiter.Seek(pair.second); ofst->AddArc(pair.first, aiter.Value()); } } if (ifst.Properties(kError, false)) ofst->SetProperties(kError, kError); ofst->SetProperties( ShortestPathProperties(ofst->Properties(kFstProperties, false), true), kFstProperties); } // Implements the stopping criterion when ShortestPathOptions::first_path // is set to true: // operator()(s, d, f) == true // iff every successful path through state 's' has a cost greater or equal // to 'f' under the assumption that 'd' is the shortest distance to state 's'. // Correct when using the ShortestFirstQueue with all the weights in the FST // being between One() and Zero() according to NaturalLess template <typename S, typename W, typename Queue> struct FirstPathSelect { FirstPathSelect(const Queue &) {} bool operator()(S s, W d, W f) const { return f == Plus(d, f); } }; // Specialisation for A*. // Correct when the estimate is admissible and consistent. template <typename S, typename W, typename Estimate> class FirstPathSelect<S, W, NaturalAStarQueue<S, W, Estimate>> { public: using Queue = NaturalAStarQueue<S, W, Estimate>; FirstPathSelect(const Queue &state_queue) : estimate_(state_queue.GetCompare().GetEstimate()) {} bool operator()(S s, W d, W f) const { return f == Plus(Times(d, estimate_(s)), f); } private: const Estimate &estimate_; }; // Shortest-path algorithm. It builds the output mutable FST so that it contains // the shortest path in the input FST; distance returns the shortest distances // from the source state to each state in the input FST, and the options struct // is // used to specify options such as the queue discipline, the arc filter and // delta. The super_final option is an output parameter indicating the final // state, and the parent argument is used for the storage of the backtrace path // for each state 1 to n, (i.e., the best previous state and the arc that // transition to state n.) The shortest path is the lowest weight path w.r.t. // the natural semiring order. The weights need to be right distributive and // have the path (kPath) property. False is returned if an error is encountered. // // This is not normally called by users; see ShortestPath instead (with n = 1). template <class Arc, class Queue, class ArcFilter> bool SingleShortestPath( const Fst<Arc> &ifst, std::vector<typename Arc::Weight> *distance, const ShortestPathOptions<Arc, Queue, ArcFilter> &opts, typename Arc::StateId *f_parent, std::vector<std::pair<typename Arc::StateId, size_t>> *parent) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; static_assert(IsPath<Weight>::value, "Weight must have path property."); static_assert((Weight::Properties() & kRightSemiring) == kRightSemiring, "Weight must be right distributive."); parent->clear(); *f_parent = kNoStateId; if (ifst.Start() == kNoStateId) return true; std::vector<bool> enqueued; auto state_queue = opts.state_queue; const auto source = (opts.source == kNoStateId) ? ifst.Start() : opts.source; bool final_seen = false; auto f_distance = Weight::Zero(); distance->clear(); state_queue->Clear(); while (distance->size() < source) { distance->push_back(Weight::Zero()); enqueued.push_back(false); parent->push_back(std::make_pair(kNoStateId, kNoArc)); } distance->push_back(Weight::One()); parent->push_back(std::make_pair(kNoStateId, kNoArc)); state_queue->Enqueue(source); enqueued.push_back(true); while (!state_queue->Empty()) { const auto s = state_queue->Head(); state_queue->Dequeue(); enqueued[s] = false; const auto sd = (*distance)[s]; // If we are using a shortest queue, no other path is going to be shorter // than f_distance at this point. using FirstPath = FirstPathSelect<StateId, Weight, Queue>; if (opts.first_path && final_seen && FirstPath(*state_queue)(s, sd, f_distance)) { break; } if (ifst.Final(s) != Weight::Zero()) { const auto plus = Plus(f_distance, Times(sd, ifst.Final(s))); if (f_distance != plus) { f_distance = plus; *f_parent = s; } if (!f_distance.Member()) return false; final_seen = true; } for (ArcIterator<Fst<Arc>> aiter(ifst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); while (distance->size() <= arc.nextstate) { distance->push_back(Weight::Zero()); enqueued.push_back(false); parent->push_back(std::make_pair(kNoStateId, kNoArc)); } auto &nd = (*distance)[arc.nextstate]; const auto weight = Times(sd, arc.weight); if (nd != Plus(nd, weight)) { nd = Plus(nd, weight); if (!nd.Member()) return false; (*parent)[arc.nextstate] = std::make_pair(s, aiter.Position()); if (!enqueued[arc.nextstate]) { state_queue->Enqueue(arc.nextstate); enqueued[arc.nextstate] = true; } else { state_queue->Update(arc.nextstate); } } } } return true; } template <class StateId, class Weight> class ShortestPathCompare { public: ShortestPathCompare(const std::vector<std::pair<StateId, Weight>> &pairs, const std::vector<Weight> &distance, StateId superfinal, float delta) : pairs_(pairs), distance_(distance), superfinal_(superfinal), delta_(delta) {} bool operator()(const StateId x, const StateId y) const { const auto &px = pairs_[x]; const auto &py = pairs_[y]; const auto wx = Times(PWeight(px.first), px.second); const auto wy = Times(PWeight(py.first), py.second); // Penalize complete paths to ensure correct results with inexact weights. // This forms a strict weak order so long as ApproxEqual(a, b) => // ApproxEqual(a, c) for all c s.t. less_(a, c) && less_(c, b). if (px.first == superfinal_ && py.first != superfinal_) { return less_(wy, wx) || ApproxEqual(wx, wy, delta_); } else if (py.first == superfinal_ && px.first != superfinal_) { return less_(wy, wx) && !ApproxEqual(wx, wy, delta_); } else { return less_(wy, wx); } } private: Weight PWeight(StateId state) const { return (state == superfinal_) ? Weight::One() : (state < distance_.size()) ? distance_[state] : Weight::Zero(); } const std::vector<std::pair<StateId, Weight>> &pairs_; const std::vector<Weight> &distance_; const StateId superfinal_; const float delta_; NaturalLess<Weight> less_; }; // N-Shortest-path algorithm: implements the core n-shortest path algorithm. // The output is built reversed. See below for versions with more options and // *not reversed*. // // The output mutable FST contains the REVERSE of n'shortest paths in the input // FST; distance must contain the shortest distance from each state to a final // state in the input FST; delta is the convergence delta. // // The n-shortest paths are the n-lowest weight paths w.r.t. the natural // semiring order. The single path that can be read from the ith of at most n // transitions leaving the initial state of the input FST is the ith shortest // path. Disregarding the initial state and initial transitions, the // n-shortest paths, in fact, form a tree rooted at the single final state. // // The weights need to be left and right distributive (kSemiring) and have the // path (kPath) property. // // Arc weights must satisfy the property that the sum of the weights of one or // more paths from some state S to T is never Zero(). In particular, arc weights // are never Zero(). // // For more information, see: // // Mohri, M, and Riley, M. 2002. An efficient algorithm for the n-best-strings // problem. In Proc. ICSLP. // // The algorithm relies on the shortest-distance algorithm. There are some // issues with the pseudo-code as written in the paper (viz., line 11). // // IMPLEMENTATION NOTE: The input FST can be a delayed FST and at any state in // its expansion the values of distance vector need only be defined at that time // for the states that are known to exist. template <class Arc, class RevArc> void NShortestPath(const Fst<RevArc> &ifst, MutableFst<Arc> *ofst, const std::vector<typename Arc::Weight> &distance, int32 nshortest, float delta = kShortestDelta, typename Arc::Weight weight_threshold = Arc::Weight::Zero(), typename Arc::StateId state_threshold = kNoStateId) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Pair = std::pair<StateId, Weight>; static_assert((Weight::Properties() & kPath) == kPath, "Weight must have path property."); static_assert((Weight::Properties() & kSemiring) == kSemiring, "Weight must be distributive."); if (nshortest <= 0) return; ofst->DeleteStates(); ofst->SetInputSymbols(ifst.InputSymbols()); ofst->SetOutputSymbols(ifst.OutputSymbols()); // Each state in ofst corresponds to a path with weight w from the initial // state of ifst to a state s in ifst, that can be characterized by a pair // (s, w). The vector pairs maps each state in ofst to the corresponding // pair maps states in ofst to the corresponding pair (s, w). std::vector<Pair> pairs; // The supefinal state is denoted by kNoStateId. The distance from the // superfinal state to the final state is semiring One, so // `distance[kNoStateId]` is not needed. const ShortestPathCompare<StateId, Weight> compare(pairs, distance, kNoStateId, delta); const NaturalLess<Weight> less; if (ifst.Start() == kNoStateId || distance.size() <= ifst.Start() || distance[ifst.Start()] == Weight::Zero() || less(weight_threshold, Weight::One()) || state_threshold == 0) { if (ifst.Properties(kError, false)) ofst->SetProperties(kError, kError); return; } ofst->SetStart(ofst->AddState()); const auto final_state = ofst->AddState(); ofst->SetFinal(final_state, Weight::One()); while (pairs.size() <= final_state) { pairs.push_back(std::make_pair(kNoStateId, Weight::Zero())); } pairs[final_state] = std::make_pair(ifst.Start(), Weight::One()); std::vector<StateId> heap; heap.push_back(final_state); const auto limit = Times(distance[ifst.Start()], weight_threshold); // r[s + 1], s state in fst, is the number of states in ofst which // corresponding pair contains s, i.e., it is number of paths computed so far // to s. Valid for s == kNoStateId (the superfinal state). std::vector<int> r; while (!heap.empty()) { std::pop_heap(heap.begin(), heap.end(), compare); const auto state = heap.back(); const auto p = pairs[state]; heap.pop_back(); const auto d = (p.first == kNoStateId) ? Weight::One() : (p.first < distance.size()) ? distance[p.first] : Weight::Zero(); if (less(limit, Times(d, p.second)) || (state_threshold != kNoStateId && ofst->NumStates() >= state_threshold)) { continue; } while (r.size() <= p.first + 1) r.push_back(0); ++r[p.first + 1]; if (p.first == kNoStateId) { ofst->AddArc(ofst->Start(), Arc(0, 0, Weight::One(), state)); } if ((p.first == kNoStateId) && (r[p.first + 1] == nshortest)) break; if (r[p.first + 1] > nshortest) continue; if (p.first == kNoStateId) continue; for (ArcIterator<Fst<RevArc>> aiter(ifst, p.first); !aiter.Done(); aiter.Next()) { const auto &rarc = aiter.Value(); Arc arc(rarc.ilabel, rarc.olabel, rarc.weight.Reverse(), rarc.nextstate); const auto weight = Times(p.second, arc.weight); const auto next = ofst->AddState(); pairs.push_back(std::make_pair(arc.nextstate, weight)); arc.nextstate = state; ofst->AddArc(next, arc); heap.push_back(next); std::push_heap(heap.begin(), heap.end(), compare); } const auto final_weight = ifst.Final(p.first).Reverse(); if (final_weight != Weight::Zero()) { const auto weight = Times(p.second, final_weight); const auto next = ofst->AddState(); pairs.push_back(std::make_pair(kNoStateId, weight)); ofst->AddArc(next, Arc(0, 0, final_weight, state)); heap.push_back(next); std::push_heap(heap.begin(), heap.end(), compare); } } Connect(ofst); if (ifst.Properties(kError, false)) ofst->SetProperties(kError, kError); ofst->SetProperties( ShortestPathProperties(ofst->Properties(kFstProperties, false)), kFstProperties); } } // namespace internal // N-Shortest-path algorithm: this version allows finer control via the options // argument. See below for a simpler interface. The output mutable FST contains // the n-shortest paths in the input FST; the distance argument is used to // return the shortest distances from the source state to each state in the // input FST, and the options struct is used to specify the number of paths to // return, whether they need to have distinct input strings, the queue // discipline, the arc filter and the convergence delta. // // The n-shortest paths are the n-lowest weight paths w.r.t. the natural // semiring order. The single path that can be read from the ith of at most n // transitions leaving the initial state of the output FST is the ith shortest // path. // Disregarding the initial state and initial transitions, The n-shortest paths, // in fact, form a tree rooted at the single final state. // // The weights need to be right distributive and have the path (kPath) property. // They need to be left distributive as well for nshortest > 1. // // For more information, see: // // Mohri, M, and Riley, M. 2002. An efficient algorithm for the n-best-strings // problem. In Proc. ICSLP. // // The algorithm relies on the shortest-distance algorithm. There are some // issues with the pseudo-code as written in the paper (viz., line 11). template <class Arc, class Queue, class ArcFilter, typename std::enable_if<IsPath<typename Arc::Weight>::value>::type * = nullptr> void ShortestPath(const Fst<Arc> &ifst, MutableFst<Arc> *ofst, std::vector<typename Arc::Weight> *distance, const ShortestPathOptions<Arc, Queue, ArcFilter> &opts) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using RevArc = ReverseArc<Arc>; if (opts.nshortest == 1) { std::vector<std::pair<StateId, size_t>> parent; StateId f_parent; if (internal::SingleShortestPath(ifst, distance, opts, &f_parent, &parent)) { internal::SingleShortestPathBacktrace(ifst, ofst, parent, f_parent); } else { ofst->SetProperties(kError, kError); } return; } if (opts.nshortest <= 0) return; if (!opts.has_distance) { ShortestDistance(ifst, distance, opts); if (distance->size() == 1 && !(*distance)[0].Member()) { ofst->SetProperties(kError, kError); return; } } // Algorithm works on the reverse of 'fst'; 'distance' is the distance to the // final state in 'rfst', 'ofst' is built as the reverse of the tree of // n-shortest path in 'rfst'. VectorFst<RevArc> rfst; Reverse(ifst, &rfst); auto d = Weight::Zero(); for (ArcIterator<VectorFst<RevArc>> aiter(rfst, 0); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); const auto state = arc.nextstate - 1; if (state < distance->size()) { d = Plus(d, Times(arc.weight.Reverse(), (*distance)[state])); } } // TODO(kbg): Avoid this expensive vector operation. distance->insert(distance->begin(), d); if (!opts.unique) { internal::NShortestPath(rfst, ofst, *distance, opts.nshortest, opts.delta, opts.weight_threshold, opts.state_threshold); } else { std::vector<Weight> ddistance; DeterminizeFstOptions<RevArc> dopts(opts.delta); DeterminizeFst<RevArc> dfst(rfst, distance, &ddistance, dopts); internal::NShortestPath(dfst, ofst, ddistance, opts.nshortest, opts.delta, opts.weight_threshold, opts.state_threshold); } // TODO(kbg): Avoid this expensive vector operation. distance->erase(distance->begin()); } template <class Arc, class Queue, class ArcFilter, typename std::enable_if<!IsPath<typename Arc::Weight>::value>::type * = nullptr> void ShortestPath(const Fst<Arc> &, MutableFst<Arc> *ofst, std::vector<typename Arc::Weight> *, const ShortestPathOptions<Arc, Queue, ArcFilter> &) { FSTERROR() << "ShortestPath: Weight needs to have the " << "path property and be distributive: " << Arc::Weight::Type(); ofst->SetProperties(kError, kError); } // Shortest-path algorithm: simplified interface. See above for a version that // allows finer control. The output mutable FST contains the n-shortest paths // in the input FST. The queue discipline is automatically selected. When unique // is true, only paths with distinct input label sequences are returned. // // The n-shortest paths are the n-lowest weight paths w.r.t. the natural // semiring order. The single path that can be read from the ith of at most n // transitions leaving the initial state of the output FST is the ith best path. // The weights need to be right distributive and have the path (kPath) property. template <class Arc> void ShortestPath(const Fst<Arc> &ifst, MutableFst<Arc> *ofst, int32 nshortest = 1, bool unique = false, bool first_path = false, typename Arc::Weight weight_threshold = Arc::Weight::Zero(), typename Arc::StateId state_threshold = kNoStateId, float delta = kShortestDelta) { using StateId = typename Arc::StateId; std::vector<typename Arc::Weight> distance; AnyArcFilter<Arc> arc_filter; AutoQueue<StateId> state_queue(ifst, &distance, arc_filter); const ShortestPathOptions<Arc, AutoQueue<StateId>, AnyArcFilter<Arc>> opts( &state_queue, arc_filter, nshortest, unique, false, delta, first_path, weight_threshold, state_threshold); ShortestPath(ifst, ofst, &distance, opts); } } // namespace fst #endif // FST_SHORTEST_PATH_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/pdt/getters.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/extensions/pdt/getters.h> namespace fst { namespace script { bool GetPdtComposeFilter(const string &str, PdtComposeFilter *cf) { if (str == "expand") { *cf = EXPAND_FILTER; } else if (str == "expand_paren") { *cf = EXPAND_PAREN_FILTER; } else if (str == "paren") { *cf = PAREN_FILTER; } else { return false; } return true; } bool GetPdtParserType(const string &str, PdtParserType *pt) { if (str == "left") { *pt = PDT_LEFT_PARSER; } else if (str == "left_sr") { *pt = PDT_LEFT_SR_PARSER; } else { return false; } return true; } } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/connect.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/connect.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Connect(MutableFstClass *fst) { Apply<Operation<MutableFstClass>>("Connect", fst->ArcType(), fst); } REGISTER_FST_OPERATION(Connect, StdArc, MutableFstClass); REGISTER_FST_OPERATION(Connect, LogArc, MutableFstClass); REGISTER_FST_OPERATION(Connect, Log64Arc, MutableFstClass); } // namespace script } // namespace fst
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/usage.hh
#ifndef UTIL_USAGE_H #define UTIL_USAGE_H #include <cstddef> #include <iosfwd> #include <string> #include <stdint.h> namespace util { // Time in seconds since process started. Zero on unsupported platforms. double WallTime(); // User + system time, process-wide. double CPUTime(); // User + system time, thread-specific. double ThreadTime(); // Resident usage in bytes. uint64_t RSSMax(); void PrintUsage(std::ostream &to); // Determine how much physical memory there is. Return 0 on failure. uint64_t GuessPhysicalMemory(); // Parse a size like unix sort. Sadly, this means the default multiplier is K. uint64_t ParseSize(const std::string &arg); } // namespace util #endif // UTIL_USAGE_H
0
coqui_public_repos
coqui_public_repos/STT/transcribe.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function if __name__ == "__main__": print( "Using the top level transcribe.py script is deprecated and will be removed " "in a future release. Instead use: python -m coqui_stt_training.transcribe" ) try: from coqui_stt_training import transcribe as stt_transcribe except ImportError: print("Training package is not installed. See training documentation.") raise stt_transcribe.main()
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/decoder-build.sh
#!/bin/bash set -xe source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then export SYSTEM_TARGET=host-win else export SYSTEM_TARGET=host fi; do_deepspeech_decoder_build
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/import_voxforge.py
#!/usr/bin/env python import codecs import os import re import sys import tarfile import threading import unicodedata import urllib from glob import glob from multiprocessing.pool import ThreadPool from os import makedirs, path import pandas from bs4 import BeautifulSoup from coqui_stt_training.util.downloader import maybe_download from tensorflow.python.platform import gfile """The number of jobs to run in parallel""" NUM_PARALLEL = 8 """Lambda function returns the filename of a path""" filename_of = lambda x: path.split(x)[1] class AtomicCounter(object): """A class that atomically increments a counter""" def __init__(self, start_count=0): """Initialize the counter :param start_count: the number to start counting at """ self.__lock = threading.Lock() self.__count = start_count def increment(self, amount=1): """Increments the counter by the given amount :param amount: the amount to increment by (default 1) :return: the incremented value of the counter """ self.__lock.acquire() self.__count += amount v = self.value() self.__lock.release() return v def value(self): """Returns the current value of the counter (not atomic)""" return self.__count def _parallel_downloader(voxforge_url, archive_dir, total, counter): """Generate a function to download a file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param voxforge_url: the base voxforge URL :param archive_dir: the location to store the downloaded file :param total: the total number of files to download :param counter: an atomic counter to keep track of # of downloaded files :return: a function that actually downloads a file given these params """ def download(d): """Binds voxforge_url, archive_dir, total, and counter into this scope Downloads the given file :param d: a tuple consisting of (index, file) where index is the index of the file to download and file is the name of the file to download """ (i, file) = d download_url = voxforge_url + "/" + file c = counter.increment() print("Downloading file {} ({}/{})...".format(i + 1, c, total)) maybe_download(filename_of(download_url), archive_dir, download_url) return download def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter): """Generate a function to extract a tar file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param data_dir: the target directory to extract into :param number_of_test: the number of files to keep as the test set :param number_of_dev: the number of files to keep as the dev set :param total: the total number of files to extract :param counter: an atomic counter to keep track of # of extracted files :return: a function that actually extracts a tar file given these params """ def extract(d): """Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope Extracts the given file :param d: a tuple consisting of (index, file) where index is the index of the file to extract and file is the name of the file to extract """ (i, archive) = d if i < number_of_test: dataset_dir = path.join(data_dir, "test") elif i < number_of_test + number_of_dev: dataset_dir = path.join(data_dir, "dev") else: dataset_dir = path.join(data_dir, "train") if not gfile.Exists( os.path.join(dataset_dir, ".".join(filename_of(archive).split(".")[:-1])) ): c = counter.increment() print("Extracting file {} ({}/{})...".format(i + 1, c, total)) tar = tarfile.open(archive) tar.extractall(dataset_dir) tar.close() return extract def _download_and_preprocess_data(data_dir): # Conditionally download data to data_dir if not path.isdir(data_dir): makedirs(data_dir) archive_dir = data_dir + "/archive" if not path.isdir(archive_dir): makedirs(archive_dir) print( "Downloading Voxforge data set into {} if not already present...".format( archive_dir ) ) voxforge_url = "http://www.repository.voxforge1.org/downloads/SpeechCorpus/Trunk/Audio/Main/16kHz_16bit" html_page = urllib.request.urlopen(voxforge_url) soup = BeautifulSoup(html_page, "html.parser") # list all links refs = [l["href"] for l in soup.find_all("a") if ".tgz" in l["href"]] # download files in parallel print("{} files to download".format(len(refs))) downloader = _parallel_downloader( voxforge_url, archive_dir, len(refs), AtomicCounter() ) p = ThreadPool(NUM_PARALLEL) p.map(downloader, enumerate(refs)) # Conditionally extract data to dataset_dir if not path.isdir(os.path.join(data_dir, "test")): makedirs(os.path.join(data_dir, "test")) if not path.isdir(os.path.join(data_dir, "dev")): makedirs(os.path.join(data_dir, "dev")) if not path.isdir(os.path.join(data_dir, "train")): makedirs(os.path.join(data_dir, "train")) tarfiles = glob(os.path.join(archive_dir, "*.tgz")) number_of_files = len(tarfiles) number_of_test = number_of_files // 100 number_of_dev = number_of_files // 100 # extract tars in parallel print( "Extracting Voxforge data set into {} if not already present...".format( data_dir ) ) extracter = _parallel_extracter( data_dir, number_of_test, number_of_dev, len(tarfiles), AtomicCounter() ) p.map(extracter, enumerate(tarfiles)) # Generate data set print("Generating Voxforge data set into {}".format(data_dir)) test_files = _generate_dataset(data_dir, "test") dev_files = _generate_dataset(data_dir, "dev") train_files = _generate_dataset(data_dir, "train") # Write sets to disk as CSV files train_files.to_csv(os.path.join(data_dir, "voxforge-train.csv"), index=False) dev_files.to_csv(os.path.join(data_dir, "voxforge-dev.csv"), index=False) test_files.to_csv(os.path.join(data_dir, "voxforge-test.csv"), index=False) def _generate_dataset(data_dir, data_set): extracted_dir = path.join(data_dir, data_set) files = [] for promts_file in glob(os.path.join(extracted_dir + "/*/etc/", "PROMPTS")): if path.isdir(os.path.join(promts_file[:-11], "wav")): with codecs.open(promts_file, "r", "utf-8") as f: for line in f: id = line.split(" ")[0].split("/")[-1] sentence = " ".join(line.split(" ")[1:]) sentence = re.sub("[^a-z']", " ", sentence.strip().lower()) transcript = "" for token in sentence.split(" "): word = token.strip() if word != "" and word != " ": transcript += word + " " transcript = ( unicodedata.normalize("NFKD", transcript.strip()) .encode("ascii", "ignore") .decode("ascii", "ignore") ) wav_file = path.join(promts_file[:-11], "wav/" + id + ".wav") if gfile.Exists(wav_file): wav_filesize = path.getsize(wav_file) # remove audios that are shorter than 0.5s and longer than 20s. # remove audios that are too short for transcript. if ( (wav_filesize / 32000) > 0.5 and (wav_filesize / 32000) < 20 and transcript != "" and wav_filesize / len(transcript) > 1400 ): files.append( (os.path.abspath(wav_file), wav_filesize, transcript) ) return pandas.DataFrame( data=files, columns=["wav_filename", "wav_filesize", "transcript"] ) if __name__ == "__main__": _download_and_preprocess_data(sys.argv[1])
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests2/test_glow_tts_d-vectors_train.py
import glob import json import os import shutil from trainer import get_last_checkpoint from tests import get_device_id, get_tests_output_path, run_cli from TTS.tts.configs.glow_tts_config import GlowTTSConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") config = GlowTTSConfig( batch_size=2, eval_batch_size=8, num_loader_workers=0, num_eval_loader_workers=0, text_cleaner="english_cleaners", use_phonemes=True, phoneme_language="en-us", phoneme_cache_path="tests/data/ljspeech/phoneme_cache/", run_eval=True, test_delay_epochs=-1, epochs=1, print_step=1, print_eval=True, test_sentences=[ "Be a voice, not an echo.", ], data_dep_init_steps=1.0, use_speaker_embedding=False, use_d_vector_file=True, d_vector_file="tests/data/ljspeech/speakers.json", d_vector_dim=256, ) config.audio.do_trim_silence = True config.audio.trim_db = 60 config.save_json(config_path) # train the model for one epoch command_train = ( f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " f"--coqpit.output_path {output_path} " "--coqpit.datasets.0.formatter ljspeech_test " "--coqpit.datasets.0.meta_file_train metadata.csv " "--coqpit.datasets.0.meta_file_val metadata.csv " "--coqpit.datasets.0.path tests/data/ljspeech " "--coqpit.datasets.0.meta_file_attn_mask tests/data/ljspeech/metadata_attn_mask.txt " "--coqpit.test_delay_epochs 0" ) run_cli(command_train) # Find latest folder continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) # Inference using TTS API continue_config_path = os.path.join(continue_path, "config.json") continue_restore_path, _ = get_last_checkpoint(continue_path) out_wav_path = os.path.join(get_tests_output_path(), "output.wav") speaker_id = "ljspeech-1" continue_speakers_path = config.d_vector_file # Check integrity of the config with open(continue_config_path, "r", encoding="utf-8") as f: config_loaded = json.load(f) assert config_loaded["characters"] is not None assert config_loaded["output_path"] in continue_path assert config_loaded["test_delay_epochs"] == 0 # Load the model and run inference inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" run_cli(inference_command) # restore the model and continue training for one more epoch command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " run_cli(command_train) shutil.rmtree(continue_path)
0
coqui_public_repos/TTS/TTS
coqui_public_repos/TTS/TTS/bin/train_vocoder.py
import os from dataclasses import dataclass, field from trainer import Trainer, TrainerArgs from TTS.config import load_config, register_config from TTS.utils.audio import AudioProcessor from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data from TTS.vocoder.models import setup_model @dataclass class TrainVocoderArgs(TrainerArgs): config_path: str = field(default=None, metadata={"help": "Path to the config file."}) def main(): """Run `tts` model training directly by a `config.json` file.""" # init trainer args train_args = TrainVocoderArgs() parser = train_args.init_argparse(arg_prefix="") # override trainer args from comman-line args args, config_overrides = parser.parse_known_args() train_args.parse_args(args) # load config.json and register if args.config_path or args.continue_path: if args.config_path: # init from a file config = load_config(args.config_path) if len(config_overrides) > 0: config.parse_known_args(config_overrides, relaxed_parser=True) elif args.continue_path: # continue from a prev experiment config = load_config(os.path.join(args.continue_path, "config.json")) if len(config_overrides) > 0: config.parse_known_args(config_overrides, relaxed_parser=True) else: # init from console args from TTS.config.shared_configs import BaseTrainingConfig # pylint: disable=import-outside-toplevel config_base = BaseTrainingConfig() config_base.parse_known_args(config_overrides) config = register_config(config_base.model)() # load training samples if "feature_path" in config and config.feature_path: # load pre-computed features print(f" > Loading features from: {config.feature_path}") eval_samples, train_samples = load_wav_feat_data(config.data_path, config.feature_path, config.eval_split_size) else: # load data raw wav files eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # setup audio processor ap = AudioProcessor(**config.audio) # init the model from config model = setup_model(config) # init the trainer and 🚀 trainer = Trainer( train_args, config, config.output_path, model=model, train_samples=train_samples, eval_samples=eval_samples, training_assets={"audio_processor": ap}, parse_command_line_args=False, ) trainer.fit() if __name__ == "__main__": main()
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions/far/print-strings.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Outputs as strings the string FSTs in a finite-state archive. #ifndef FST_EXTENSIONS_FAR_PRINT_STRINGS_H_ #define FST_EXTENSIONS_FAR_PRINT_STRINGS_H_ #include <iomanip> #include <string> #include <vector> #include <fst/flags.h> #include <fst/extensions/far/far.h> #include <fstream> #include <fst/shortest-distance.h> #include <fst/string.h> DECLARE_string(far_field_separator); namespace fst { template <class Arc> void FarPrintStrings(const std::vector<string> &ifilenames, FarEntryType entry_type, FarTokenType far_token_type, const string &begin_key, const string &end_key, bool print_key, bool print_weight, const string &symbols_fname, bool initial_symbols, int32 generate_filenames, const string &filename_prefix, const string &filename_suffix) { StringTokenType token_type; if (far_token_type == FTT_SYMBOL) { token_type = StringTokenType::SYMBOL; } else if (far_token_type == FTT_BYTE) { token_type = StringTokenType::BYTE; } else if (far_token_type == FTT_UTF8) { token_type = StringTokenType::UTF8; } else { FSTERROR() << "FarPrintStrings: Unknown token type"; return; } std::unique_ptr<const SymbolTable> syms; if (!symbols_fname.empty()) { // TODO(kbg): Allow negative flag? const SymbolTableTextOptions opts(true); syms.reset(SymbolTable::ReadText(symbols_fname, opts)); if (!syms) { LOG(ERROR) << "FarPrintStrings: Error reading symbol table " << symbols_fname; return; } } std::unique_ptr<FarReader<Arc>> far_reader(FarReader<Arc>::Open(ifilenames)); if (!far_reader) return; if (!begin_key.empty()) far_reader->Find(begin_key); string okey; int nrep = 0; for (int i = 1; !far_reader->Done(); far_reader->Next(), ++i) { const auto &key = far_reader->GetKey(); if (!end_key.empty() && end_key < key) break; if (okey == key) { ++nrep; } else { nrep = 0; } okey = key; const auto *fst = far_reader->GetFst(); if (i == 1 && initial_symbols && !syms && fst->InputSymbols()) syms.reset(fst->InputSymbols()->Copy()); string str; VLOG(2) << "Handling key: " << key; StringPrinter<Arc> string_printer(token_type, syms ? syms.get() : fst->InputSymbols()); string_printer(*fst, &str); if (entry_type == FET_LINE) { if (print_key) std::cout << key << FLAGS_far_field_separator[0]; std::cout << str; if (print_weight) std::cout << FLAGS_far_field_separator[0] << ShortestDistance(*fst); std::cout << std::endl; } else if (entry_type == FET_FILE) { std::stringstream sstrm; if (generate_filenames) { sstrm.fill('0'); sstrm << std::right << std::setw(generate_filenames) << i; } else { sstrm << key; if (nrep > 0) sstrm << "." << nrep; } string filename; filename = filename_prefix + sstrm.str() + filename_suffix; std::ofstream ostrm(filename); if (!ostrm) { LOG(ERROR) << "FarPrintStrings: Can't open file: " << filename; return; } ostrm << str; if (token_type == StringTokenType::SYMBOL) ostrm << "\n"; } } } } // namespace fst #endif // FST_EXTENSIONS_FAR_PRINT_STRINGS_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/decoder/Decoder.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include "flashlight/lib/text/decoder/Utils.h" namespace fl { namespace lib { namespace text { enum class CriterionType { ASG = 0, CTC = 1, S2S = 2 }; /** * Decoder support two typical use cases: * Offline manner: * decoder.decode(someData) [returns all hypothesis (transcription)] * * Online manner: * decoder.decodeBegin() [called only at the beginning of the stream] * while (stream) * decoder.decodeStep(someData) [one or more calls] * decoder.getBestHypothesis() [returns the best hypothesis (transcription)] * decoder.prune() [prunes the hypothesis space] * decoder.decodeEnd() [called only at the end of the stream] * * Note: function decoder.prune() deletes hypothesis up until time when called * to supports online decoding. It will also add a offset to the scores in beam * to avoid underflow/overflow. * */ class Decoder { public: Decoder() = default; virtual ~Decoder() = default; /* Initialize decoder before starting consume emissions */ virtual void decodeBegin() {} /* Consume emissions in T x N chunks and increase the hypothesis space */ virtual void decodeStep(const float* emissions, int T, int N) = 0; /* Finish up decoding after consuming all emissions */ virtual void decodeEnd() {} /* Offline decode function, which consume all emissions at once */ virtual std::vector<DecodeResult> decode(const float* emissions, int T, int N) { decodeBegin(); decodeStep(emissions, T, N); decodeEnd(); return getAllFinalHypothesis(); } /* Prune the hypothesis space */ virtual void prune(int lookBack = 0) = 0; /* Get the number of decoded frame in buffer */ virtual int nDecodedFramesInBuffer() const = 0; /* * Get the best completed hypothesis which is `lookBack` frames ahead the last * one in buffer. For lexicon requiredd LMs, completed hypothesis means no * partial word appears at the end. */ virtual DecodeResult getBestHypothesis(int lookBack = 0) const = 0; /* Get all the final hypothesis */ virtual std::vector<DecodeResult> getAllFinalHypothesis() const = 0; }; } // namespace text } // namespace lib } // namespace fl
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/mpdt/mpdtreverse.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Reverses an MPDT. #include <cstring> #include <memory> #include <string> #include <vector> #include <fst/flags.h> #include <fst/log.h> #include <fst/extensions/mpdt/mpdtscript.h> #include <fst/extensions/mpdt/read_write_utils.h> #include <fst/util.h> DEFINE_string(mpdt_parentheses, "", "MPDT parenthesis label pairs with assignments."); DEFINE_string(mpdt_new_parentheses, "", "Output for reassigned parentheses and stacks"); int main(int argc, char **argv) { namespace s = fst::script; using fst::ReadLabelTriples; using fst::WriteLabelTriples; using fst::script::FstClass; using fst::script::VectorFstClass; string usage = "Reverse an MPDT.\n\n Usage: "; usage += argv[0]; usage += " in.pdt [out.fst]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && (strcmp(argv[1], "-") != 0)) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; if (FLAGS_mpdt_parentheses.empty()) { LOG(ERROR) << argv[0] << ": No MPDT parenthesis label pairs provided"; return 1; } if (FLAGS_mpdt_new_parentheses.empty()) { LOG(ERROR) << argv[0] << ": No MPDT output parenthesis label file provided"; return 1; } std::vector<s::LabelPair> parens; std::vector<int64> assignments; if (!ReadLabelTriples(FLAGS_mpdt_parentheses, &parens, &assignments, false)) return 1; VectorFstClass ofst(ifst->ArcType()); s::MPdtReverse(*ifst, parens, &assignments, &ofst); ofst.Write(out_name); if (!WriteLabelTriples(FLAGS_mpdt_new_parentheses, parens, assignments)) return 1; return 0; }
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/configs/align_tts_config.py
from dataclasses import dataclass, field from typing import List from TTS.tts.configs.shared_configs import BaseTTSConfig from TTS.tts.models.align_tts import AlignTTSArgs @dataclass class AlignTTSConfig(BaseTTSConfig): """Defines parameters for AlignTTS model. Example: >>> from TTS.tts.configs.align_tts_config import AlignTTSConfig >>> config = AlignTTSConfig() Args: model(str): Model name used for selecting the right model at initialization. Defaults to `align_tts`. positional_encoding (bool): enable / disable positional encoding applied to the encoder output. Defaults to True. hidden_channels (int): Base number of hidden channels. Defines all the layers expect ones defined by the specific encoder or decoder parameters. Defaults to 256. hidden_channels_dp (int): Number of hidden channels of the duration predictor's layers. Defaults to 256. encoder_type (str): Type of the encoder used by the model. Look at `TTS.tts.layers.feed_forward.encoder` for more details. Defaults to `fftransformer`. encoder_params (dict): Parameters used to define the encoder network. Look at `TTS.tts.layers.feed_forward.encoder` for more details. Defaults to `{"hidden_channels_ffn": 1024, "num_heads": 2, "num_layers": 6, "dropout_p": 0.1}`. decoder_type (str): Type of the decoder used by the model. Look at `TTS.tts.layers.feed_forward.decoder` for more details. Defaults to `fftransformer`. decoder_params (dict): Parameters used to define the decoder network. Look at `TTS.tts.layers.feed_forward.decoder` for more details. Defaults to `{"hidden_channels_ffn": 1024, "num_heads": 2, "num_layers": 6, "dropout_p": 0.1}`. phase_start_steps (List[int]): A list of number of steps required to start the next training phase. AlignTTS has 4 different training phases. Thus you need to define 4 different values to enable phase based training. If None, it trains the whole model together. Defaults to None. ssim_alpha (float): Weight for the SSIM loss. If set <= 0, disables the SSIM loss. Defaults to 1.0. duration_loss_alpha (float): Weight for the duration predictor's loss. Defaults to 1.0. mdn_alpha (float): Weight for the MDN loss. Defaults to 1.0. spec_loss_alpha (float): Weight for the MSE spectrogram loss. If set <= 0, disables the L1 loss. Defaults to 1.0. use_speaker_embedding (bool): enable / disable using speaker embeddings for multi-speaker models. If set True, the model is in the multi-speaker mode. Defaults to False. use_d_vector_file (bool): enable /disable using external speaker embeddings in place of the learned embeddings. Defaults to False. d_vector_file (str): Path to the file including pre-computed speaker embeddings. Defaults to None. noam_schedule (bool): enable / disable the use of Noam LR scheduler. Defaults to False. warmup_steps (int): Number of warm-up steps for the Noam scheduler. Defaults 4000. lr (float): Initial learning rate. Defaults to `1e-3`. wd (float): Weight decay coefficient. Defaults to `1e-7`. min_seq_len (int): Minimum input sequence length to be used at training. max_seq_len (int): Maximum input sequence length to be used at training. Larger values result in more VRAM usage.""" model: str = "align_tts" # model specific params model_args: AlignTTSArgs = field(default_factory=AlignTTSArgs) phase_start_steps: List[int] = None ssim_alpha: float = 1.0 spec_loss_alpha: float = 1.0 dur_loss_alpha: float = 1.0 mdn_alpha: float = 1.0 # multi-speaker settings use_speaker_embedding: bool = False use_d_vector_file: bool = False d_vector_file: str = False # optimizer parameters optimizer: str = "Adam" optimizer_params: dict = field(default_factory=lambda: {"betas": [0.9, 0.998], "weight_decay": 1e-6}) lr_scheduler: str = None lr_scheduler_params: dict = None lr: float = 1e-4 grad_clip: float = 5.0 # overrides min_seq_len: int = 13 max_seq_len: int = 200 r: int = 1 # testing test_sentences: List[str] = field( default_factory=lambda: [ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", "Be a voice, not an echo.", "I'm sorry Dave. I'm afraid I can't do that.", "This cake is great. It's so delicious and moist.", "Prior to November 22, 1963.", ] )
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/tacotron/attentions.py
import torch from scipy.stats import betabinom from torch import nn from torch.nn import functional as F from TTS.tts.layers.tacotron.common_layers import Linear class LocationLayer(nn.Module): """Layers for Location Sensitive Attention Args: attention_dim (int): number of channels in the input tensor. attention_n_filters (int, optional): number of filters in convolution. Defaults to 32. attention_kernel_size (int, optional): kernel size of convolution filter. Defaults to 31. """ def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31): super().__init__() self.location_conv1d = nn.Conv1d( in_channels=2, out_channels=attention_n_filters, kernel_size=attention_kernel_size, stride=1, padding=(attention_kernel_size - 1) // 2, bias=False, ) self.location_dense = Linear(attention_n_filters, attention_dim, bias=False, init_gain="tanh") def forward(self, attention_cat): """ Shapes: attention_cat: [B, 2, C] """ processed_attention = self.location_conv1d(attention_cat) processed_attention = self.location_dense(processed_attention.transpose(1, 2)) return processed_attention class GravesAttention(nn.Module): """Graves Attention as is ref1 with updates from ref2. ref1: https://arxiv.org/abs/1910.10288 ref2: https://arxiv.org/pdf/1906.01083.pdf Args: query_dim (int): number of channels in query tensor. K (int): number of Gaussian heads to be used for computing attention. """ COEF = 0.3989422917366028 # numpy.sqrt(1/(2*numpy.pi)) def __init__(self, query_dim, K): super().__init__() self._mask_value = 1e-8 self.K = K # self.attention_alignment = 0.05 self.eps = 1e-5 self.J = None self.N_a = nn.Sequential( nn.Linear(query_dim, query_dim, bias=True), nn.ReLU(), nn.Linear(query_dim, 3 * K, bias=True) ) self.attention_weights = None self.mu_prev = None self.init_layers() def init_layers(self): torch.nn.init.constant_(self.N_a[2].bias[(2 * self.K) : (3 * self.K)], 1.0) # bias mean torch.nn.init.constant_(self.N_a[2].bias[self.K : (2 * self.K)], 10) # bias std def init_states(self, inputs): if self.J is None or inputs.shape[1] + 1 > self.J.shape[-1]: self.J = torch.arange(0, inputs.shape[1] + 2.0).to(inputs.device) + 0.5 self.attention_weights = torch.zeros(inputs.shape[0], inputs.shape[1]).to(inputs.device) self.mu_prev = torch.zeros(inputs.shape[0], self.K).to(inputs.device) # pylint: disable=R0201 # pylint: disable=unused-argument def preprocess_inputs(self, inputs): return None def forward(self, query, inputs, processed_inputs, mask): """ Shapes: query: [B, C_attention_rnn] inputs: [B, T_in, C_encoder] processed_inputs: place_holder mask: [B, T_in] """ gbk_t = self.N_a(query) gbk_t = gbk_t.view(gbk_t.size(0), -1, self.K) # attention model parameters # each B x K g_t = gbk_t[:, 0, :] b_t = gbk_t[:, 1, :] k_t = gbk_t[:, 2, :] # dropout to decorrelate attention heads g_t = torch.nn.functional.dropout(g_t, p=0.5, training=self.training) # attention GMM parameters sig_t = torch.nn.functional.softplus(b_t) + self.eps mu_t = self.mu_prev + torch.nn.functional.softplus(k_t) g_t = torch.softmax(g_t, dim=-1) + self.eps j = self.J[: inputs.size(1) + 1] # attention weights phi_t = g_t.unsqueeze(-1) * (1 / (1 + torch.sigmoid((mu_t.unsqueeze(-1) - j) / sig_t.unsqueeze(-1)))) # discritize attention weights alpha_t = torch.sum(phi_t, 1) alpha_t = alpha_t[:, 1:] - alpha_t[:, :-1] alpha_t[alpha_t == 0] = 1e-8 # apply masking if mask is not None: alpha_t.data.masked_fill_(~mask, self._mask_value) context = torch.bmm(alpha_t.unsqueeze(1), inputs).squeeze(1) self.attention_weights = alpha_t self.mu_prev = mu_t return context class OriginalAttention(nn.Module): """Bahdanau Attention with various optional modifications. - Location sensitive attnetion: https://arxiv.org/abs/1712.05884 - Forward Attention: https://arxiv.org/abs/1807.06736 + state masking at inference - Using sigmoid instead of softmax normalization - Attention windowing at inference time Note: Location Sensitive Attention extends the additive attention mechanism to use cumulative attention weights from previous decoder time steps with the current time step features. Forward attention computes most probable monotonic alignment. The modified attention probabilities at each timestep are computed recursively by the forward algorithm. Transition agent in the forward attention explicitly gates the attention mechanism whether to move forward or stay at each decoder timestep. Attention windowing is a inductive prior that prevents the model from attending to previous and future timesteps beyond a certain window. Args: query_dim (int): number of channels in the query tensor. embedding_dim (int): number of channels in the vakue tensor. In general, the value tensor is the output of the encoder layer. attention_dim (int): number of channels of the inner attention layers. location_attention (bool): enable/disable location sensitive attention. attention_location_n_filters (int): number of location attention filters. attention_location_kernel_size (int): filter size of location attention convolution layer. windowing (int): window size for attention windowing. if it is 5, for computing the attention, it only considers the time steps [(t-5), ..., (t+5)] of the input. norm (str): normalization method applied to the attention weights. 'softmax' or 'sigmoid' forward_attn (bool): enable/disable forward attention. trans_agent (bool): enable/disable transition agent in the forward attention. forward_attn_mask (int): enable/disable an explicit masking in forward attention. It is useful to set at especially inference time. """ # Pylint gets confused by PyTorch conventions here # pylint: disable=attribute-defined-outside-init def __init__( self, query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask, ): super().__init__() self.query_layer = Linear(query_dim, attention_dim, bias=False, init_gain="tanh") self.inputs_layer = Linear(embedding_dim, attention_dim, bias=False, init_gain="tanh") self.v = Linear(attention_dim, 1, bias=True) if trans_agent: self.ta = nn.Linear(query_dim + embedding_dim, 1, bias=True) if location_attention: self.location_layer = LocationLayer( attention_dim, attention_location_n_filters, attention_location_kernel_size, ) self._mask_value = -float("inf") self.windowing = windowing self.win_idx = None self.norm = norm self.forward_attn = forward_attn self.trans_agent = trans_agent self.forward_attn_mask = forward_attn_mask self.location_attention = location_attention def init_win_idx(self): self.win_idx = -1 self.win_back = 2 self.win_front = 6 def init_forward_attn(self, inputs): B = inputs.shape[0] T = inputs.shape[1] self.alpha = torch.cat([torch.ones([B, 1]), torch.zeros([B, T])[:, :-1] + 1e-7], dim=1).to(inputs.device) self.u = (0.5 * torch.ones([B, 1])).to(inputs.device) def init_location_attention(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights_cum = torch.zeros([B, T], device=inputs.device) def init_states(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights = torch.zeros([B, T], device=inputs.device) if self.location_attention: self.init_location_attention(inputs) if self.forward_attn: self.init_forward_attn(inputs) if self.windowing: self.init_win_idx() def preprocess_inputs(self, inputs): return self.inputs_layer(inputs) def update_location_attention(self, alignments): self.attention_weights_cum += alignments def get_location_attention(self, query, processed_inputs): attention_cat = torch.cat((self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_cat) energies = self.v(torch.tanh(processed_query + processed_attention_weights + processed_inputs)) energies = energies.squeeze(-1) return energies, processed_query def get_attention(self, query, processed_inputs): processed_query = self.query_layer(query.unsqueeze(1)) energies = self.v(torch.tanh(processed_query + processed_inputs)) energies = energies.squeeze(-1) return energies, processed_query def apply_windowing(self, attention, inputs): back_win = self.win_idx - self.win_back front_win = self.win_idx + self.win_front if back_win > 0: attention[:, :back_win] = -float("inf") if front_win < inputs.shape[1]: attention[:, front_win:] = -float("inf") # this is a trick to solve a special problem. # but it does not hurt. if self.win_idx == -1: attention[:, 0] = attention.max() # Update the window self.win_idx = torch.argmax(attention, 1).long()[0].item() return attention def apply_forward_attention(self, alignment): # forward attention fwd_shifted_alpha = F.pad(self.alpha[:, :-1].clone().to(alignment.device), (1, 0, 0, 0)) # compute transition potentials alpha = ((1 - self.u) * self.alpha + self.u * fwd_shifted_alpha + 1e-8) * alignment # force incremental alignment if not self.training and self.forward_attn_mask: _, n = fwd_shifted_alpha.max(1) val, _ = alpha.max(1) for b in range(alignment.shape[0]): alpha[b, n[b] + 3 :] = 0 alpha[b, : (n[b] - 1)] = 0 # ignore all previous states to prevent repetition. alpha[b, (n[b] - 2)] = 0.01 * val[b] # smoothing factor for the prev step # renormalize attention weights alpha = alpha / alpha.sum(dim=1, keepdim=True) return alpha def forward(self, query, inputs, processed_inputs, mask): """ shapes: query: [B, C_attn_rnn] inputs: [B, T_en, D_en] processed_inputs: [B, T_en, D_attn] mask: [B, T_en] """ if self.location_attention: attention, _ = self.get_location_attention(query, processed_inputs) else: attention, _ = self.get_attention(query, processed_inputs) # apply masking if mask is not None: attention.data.masked_fill_(~mask, self._mask_value) # apply windowing - only in eval mode if not self.training and self.windowing: attention = self.apply_windowing(attention, inputs) # normalize attention values if self.norm == "softmax": alignment = torch.softmax(attention, dim=-1) elif self.norm == "sigmoid": alignment = torch.sigmoid(attention) / torch.sigmoid(attention).sum(dim=1, keepdim=True) else: raise ValueError("Unknown value for attention norm type") if self.location_attention: self.update_location_attention(alignment) # apply forward attention if enabled if self.forward_attn: alignment = self.apply_forward_attention(alignment) self.alpha = alignment context = torch.bmm(alignment.unsqueeze(1), inputs) context = context.squeeze(1) self.attention_weights = alignment # compute transition agent if self.forward_attn and self.trans_agent: ta_input = torch.cat([context, query.squeeze(1)], dim=-1) self.u = torch.sigmoid(self.ta(ta_input)) return context class MonotonicDynamicConvolutionAttention(nn.Module): """Dynamic convolution attention from https://arxiv.org/pdf/1910.10288.pdf query -> linear -> tanh -> linear ->| | mask values v | | atten_w(t-1) -|-> conv1d_dynamic -> linear -|-> tanh -> + -> softmax -> * -> * -> context |-> conv1d_static -> linear -| | |-> conv1d_prior -> log ----------------| query: attention rnn output. Note: Dynamic convolution attention is an alternation of the location senstive attention with dynamically computed convolution filters from the previous attention scores and a set of constraints to keep the attention alignment diagonal. DCA is sensitive to mixed precision training and might cause instable training. Args: query_dim (int): number of channels in the query tensor. embedding_dim (int): number of channels in the value tensor. static_filter_dim (int): number of channels in the convolution layer computing the static filters. static_kernel_size (int): kernel size for the convolution layer computing the static filters. dynamic_filter_dim (int): number of channels in the convolution layer computing the dynamic filters. dynamic_kernel_size (int): kernel size for the convolution layer computing the dynamic filters. prior_filter_len (int, optional): [description]. Defaults to 11 from the paper. alpha (float, optional): [description]. Defaults to 0.1 from the paper. beta (float, optional): [description]. Defaults to 0.9 from the paper. """ def __init__( self, query_dim, embedding_dim, # pylint: disable=unused-argument attention_dim, static_filter_dim, static_kernel_size, dynamic_filter_dim, dynamic_kernel_size, prior_filter_len=11, alpha=0.1, beta=0.9, ): super().__init__() self._mask_value = 1e-8 self.dynamic_filter_dim = dynamic_filter_dim self.dynamic_kernel_size = dynamic_kernel_size self.prior_filter_len = prior_filter_len self.attention_weights = None # setup key and query layers self.query_layer = nn.Linear(query_dim, attention_dim) self.key_layer = nn.Linear(attention_dim, dynamic_filter_dim * dynamic_kernel_size, bias=False) self.static_filter_conv = nn.Conv1d( 1, static_filter_dim, static_kernel_size, padding=(static_kernel_size - 1) // 2, bias=False, ) self.static_filter_layer = nn.Linear(static_filter_dim, attention_dim, bias=False) self.dynamic_filter_layer = nn.Linear(dynamic_filter_dim, attention_dim) self.v = nn.Linear(attention_dim, 1, bias=False) prior = betabinom.pmf(range(prior_filter_len), prior_filter_len - 1, alpha, beta) self.register_buffer("prior", torch.FloatTensor(prior).flip(0)) # pylint: disable=unused-argument def forward(self, query, inputs, processed_inputs, mask): """ query: [B, C_attn_rnn] inputs: [B, T_en, D_en] processed_inputs: place holder. mask: [B, T_en] """ # compute prior filters prior_filter = F.conv1d( F.pad(self.attention_weights.unsqueeze(1), (self.prior_filter_len - 1, 0)), self.prior.view(1, 1, -1) ) prior_filter = torch.log(prior_filter.clamp_min_(1e-6)).squeeze(1) G = self.key_layer(torch.tanh(self.query_layer(query))) # compute dynamic filters dynamic_filter = F.conv1d( self.attention_weights.unsqueeze(0), G.view(-1, 1, self.dynamic_kernel_size), padding=(self.dynamic_kernel_size - 1) // 2, groups=query.size(0), ) dynamic_filter = dynamic_filter.view(query.size(0), self.dynamic_filter_dim, -1).transpose(1, 2) # compute static filters static_filter = self.static_filter_conv(self.attention_weights.unsqueeze(1)).transpose(1, 2) alignment = ( self.v( torch.tanh(self.static_filter_layer(static_filter) + self.dynamic_filter_layer(dynamic_filter)) ).squeeze(-1) + prior_filter ) # compute attention weights attention_weights = F.softmax(alignment, dim=-1) # apply masking if mask is not None: attention_weights.data.masked_fill_(~mask, self._mask_value) self.attention_weights = attention_weights # compute context context = torch.bmm(attention_weights.unsqueeze(1), inputs).squeeze(1) return context def preprocess_inputs(self, inputs): # pylint: disable=no-self-use return None def init_states(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights = torch.zeros([B, T], device=inputs.device) self.attention_weights[:, 0] = 1.0 def init_attn( attn_type, query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask, attn_K, ): if attn_type == "original": return OriginalAttention( query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask, ) if attn_type == "graves": return GravesAttention(query_dim, attn_K) if attn_type == "dynamic_convolution": return MonotonicDynamicConvolutionAttention( query_dim, embedding_dim, attention_dim, static_filter_dim=8, static_kernel_size=21, dynamic_filter_dim=8, dynamic_kernel_size=21, prior_filter_len=11, alpha=0.1, beta=0.9, ) raise RuntimeError(f" [!] Given Attention Type '{attn_type}' is not exist.")
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-electronjs_v5.0-win-amd64-opt.yml
build: template_file: test-win-opt-base.tyml dependencies: - "win-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${system.sox_win} && ${nodejs.win.prep_12} args: tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 5.0.6 16k" metadata: name: "DeepSpeech Windows AMD64 CPU ElectronJS v5.0 tests" description: "Testing DeepSpeech for Windows/AMD64 on ElectronJS v5.0, CPU only, optimized version"
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/layers/__init__.py
from TTS.tts.layers.losses import *
0
coqui_public_repos/stt-model-manager
coqui_public_repos/stt-model-manager/coqui_stt_model_manager/__main__.py
"""Entry point. Run the server and open a browser pointing to it.""" import os import time import webbrowser from threading import Thread import requests from .server import build_app, get_server_hostport, start_app def main(): app = build_app() host, port = get_server_hostport() addr = f"http://{host}:{port}" def check_server_started_loop(): while True: try: req = requests.get(addr, headers={"User-Agent": "page-opener-thread"}) if req.status_code == 200: break except requests.exceptions.ConnectionError: pass time.sleep(0.1) print(f"Started server listening on {addr} ...") webbrowser.open(addr) if "STT_MODEL_MANAGER_ALREADY_LOADED" not in os.environ: thread = Thread(target=check_server_started_loop) thread.start() os.environ["STT_MODEL_MANAGER_ALREADY_LOADED"] = "1" start_app(app) if __name__ == "__main__": main()
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions/far/isomorphic.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_FAR_ISOMORPHIC_H_ #define FST_EXTENSIONS_FAR_ISOMORPHIC_H_ #include <memory> #include <string> #include <fst/extensions/far/far.h> #include <fst/isomorphic.h> namespace fst { template <class Arc> bool FarIsomorphic(const string &filename1, const string &filename2, float delta = kDelta, const string &begin_key = string(), const string &end_key = string()) { std::unique_ptr<FarReader<Arc>> reader1(FarReader<Arc>::Open(filename1)); if (!reader1) { LOG(ERROR) << "FarIsomorphic: Cannot open FAR file " << filename1; return false; } std::unique_ptr<FarReader<Arc>> reader2(FarReader<Arc>::Open(filename2)); if (!reader2) { LOG(ERROR) << "FarIsomorphic: Cannot open FAR file " << filename2; return false; } if (!begin_key.empty()) { bool find_begin1 = reader1->Find(begin_key); bool find_begin2 = reader2->Find(begin_key); if (!find_begin1 || !find_begin2) { bool ret = !find_begin1 && !find_begin2; if (!ret) { VLOG(1) << "FarIsomorphic: Key " << begin_key << " missing from " << (find_begin1 ? "second" : "first") << " archive."; } return ret; } } for (; !reader1->Done() && !reader2->Done(); reader1->Next(), reader2->Next()) { const auto &key1 = reader1->GetKey(); const auto &key2 = reader2->GetKey(); if (!end_key.empty() && end_key < key1 && end_key < key2) return true; if (key1 != key2) { LOG(ERROR) << "FarIsomorphic: Mismatched keys " << key1 << " and " << key2; return false; } if (!Isomorphic(*(reader1->GetFst()), *(reader2->GetFst()), delta)) { LOG(ERROR) << "FarIsomorphic: FSTs for key " << key1 << " are not isomorphic"; return false; } } if (!reader1->Done() || !reader2->Done()) { LOG(ERROR) << "FarIsomorphic: Key " << (reader1->Done() ? reader2->GetKey() : reader1->GetKey()) << " missing form " << (reader2->Done() ? "first" : "second") << " archive"; return false; } return true; } } // namespace fst #endif // FST_EXTENSIONS_FAR_ISOMORPHIC_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/pdt/Makefile.am
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) if HAVE_BIN bin_PROGRAMS = pdtcompose pdtexpand pdtinfo pdtreplace pdtreverse \ pdtshortestpath LDADD = libfstpdtscript.la \ ../../script/libfstscript.la \ ../../lib/libfst.la -lm $(DL_LIBS) pdtcompose_SOURCES = pdtcompose.cc pdtexpand_SOURCES = pdtexpand.cc pdtinfo_SOURCES = pdtinfo.cc pdtreplace_SOURCES = pdtreplace.cc pdtreverse_SOURCES = pdtreverse.cc pdtshortestpath_SOURCES = pdtshortestpath.cc endif if HAVE_SCRIPT lib_LTLIBRARIES = libfstpdtscript.la libfstpdtscript_la_SOURCES = getters.cc pdtscript.cc libfstpdtscript_la_LDFLAGS = -version-info 10:0:0 libfstpdtscript_la_LIBADD = ../../script/libfstscript.la \ ../../lib/libfst.la -lm $(DL_LIBS) endif
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/rmfinalepsilon.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Function to remove of final states that have epsilon-only input arcs. #ifndef FST_RMFINALEPSILON_H_ #define FST_RMFINALEPSILON_H_ #include <unordered_set> #include <vector> #include <fst/connect.h> #include <fst/mutable-fst.h> namespace fst { // Removes final states that have epsilon-only input arcs. template <class Arc> void RmFinalEpsilon(MutableFst<Arc> *fst) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // Determines the coaccesibility of states. std::vector<bool> access; std::vector<bool> coaccess; uint64_t props = 0; SccVisitor<Arc> scc_visitor(nullptr, &access, &coaccess, &props); DfsVisit(*fst, &scc_visitor); // Finds potential list of removable final states. These are final states that // have no outgoing transitions or final states that have a non-coaccessible // future. std::unordered_set<StateId> finals; for (StateIterator<Fst<Arc>> siter(*fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (fst->Final(s) != Weight::Zero()) { bool future_coaccess = false; for (ArcIterator<Fst<Arc>> aiter(*fst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); if (coaccess[arc.nextstate]) { future_coaccess = true; break; } } if (!future_coaccess) finals.insert(s); } } // Moves the final weight. std::vector<Arc> arcs; for (StateIterator<Fst<Arc>> siter(*fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); auto weight = fst->Final(s); arcs.clear(); for (ArcIterator<Fst<Arc>> aiter(*fst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); // Next state is in the list of finals. if (finals.find(arc.nextstate) != finals.end()) { // Sums up all epsilon arcs. if (arc.ilabel == 0 && arc.olabel == 0) { weight = Plus(Times(fst->Final(arc.nextstate), arc.weight), weight); } else { arcs.push_back(arc); } } else { arcs.push_back(arc); } } // If some arcs (epsilon arcs) were deleted, delete all arcs and add back // only the non-epsilon arcs. if (arcs.size() < fst->NumArcs(s)) { fst->DeleteArcs(s); fst->SetFinal(s, weight); for (const auto &arc : arcs) fst->AddArc(s, arc); } } Connect(fst); } } // namespace fst #endif // FST_RMFINALEPSILON_H_
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/python_websocket_server/README.md
# Python websocket-based server This directory contains a simple service that receives audio data from clients, and serves the results of STT inference over a websocket. The server code in this project is a modified version of [this GitHub project](https://github.com/zelo/deepspeech-rest-api). Because STT transcriptions can typically be considered "long running tasks", using websockets for client-server communication provides several benefits: 1. Avoids all sorts of timeouts at several points in the path - for example at the client, server, load balancer and/or proxy, etc. 2. Avoids the need for the client to poll the server for result, as well as avoids the complexity that is typically induced by a polling-based architecture ## Configuration Server configuration is specified in the [`application.conf`](application.conf) file. ## Usage ### Starting the server Make sure your model and scorer files are present in the same directory as the `application.conf` file. Then execute: ``` python -m stt_server.app ``` ### Sending requests to server The client-server request-response process looks like the following: 1. Client opens websocket _W_ to server 2. Client sends _binary_ audio data via _W_ 3. Server responds with transcribed text via _W_ once transcription process is completed. The server's response is in JSON format. 4. Server closes _W_ The time _t_ taken by the transcription process depends on several factors, such as the duration of the audio, how busy the service is, etc. Under normal circumstances, _t_ is roughly the same as the duration of the provided audio. Because this service uses websockets, it is currently not possible to interact with it using certain HTTP clients which do not support websockets, like `curl`. The following example uses the Python [`websocket-client`](https://pypi.org/project/websocket_client/) package. ```python import websocket ws = websocket.WebSocket() ws.connect("ws://localhost:8080/api/v1/stt") with open("audiofile.wav", mode='rb') as file: # b is important -> binary audio = file.read() ws.send_binary(audio) result = ws.recv() print(result) # Print text transcription received from server ``` Example output: ``` {"text": "experience proves this", "time": 2.4083645999999987} ``` ## Deployment ### Kubernetes The [helm](helm) directory contains an example Helm deployment, that configures an Nginx ingress to expose the STT service. The websocket timeout on the ingress is set to 1 hour. ## Contributing Bug reports and merge requests are welcome.
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/difference.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/difference.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Difference(const FstClass &ifst1, const FstClass &ifst2, MutableFstClass *ofst, const ComposeOptions &opts) { if (!internal::ArcTypesMatch(ifst1, ifst2, "Difference") || !internal::ArcTypesMatch(*ofst, ifst1, "Difference")) { ofst->SetProperties(kError, kError); return; } DifferenceArgs args(ifst1, ifst2, ofst, opts); Apply<Operation<DifferenceArgs>>("Difference", ifst1.ArcType(), &args); } REGISTER_FST_OPERATION(Difference, StdArc, DifferenceArgs); REGISTER_FST_OPERATION(Difference, LogArc, DifferenceArgs); REGISTER_FST_OPERATION(Difference, Log64Arc, DifferenceArgs); } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/kenlm/LICENSE
Most of the code here is licensed under the LGPL. There are exceptions that have their own licenses, listed below. See comments in those files for more details. util/getopt.* is getopt for Windows util/murmur_hash.cc util/string_piece.hh and util/string_piece.cc util/double-conversion/LICENSE covers util/double-conversion except the build files util/file.cc contains a modified implementation of mkstemp under the LGPL util/integer_to_string.* is BSD For the rest: KenLM is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 2.1 of the License, or (at your option) any later version. KenLM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License 2.1 along with KenLM code. If not, see <http://www.gnu.org/licenses/lgpl-2.1.html>.
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/archives/binary.hpp
/*! \file binary.hpp \brief Binary input and output archives */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_ARCHIVES_BINARY_HPP_ #define CEREAL_ARCHIVES_BINARY_HPP_ #include "cereal/cereal.hpp" #include <sstream> namespace cereal { // ###################################################################### //! An output archive designed to save data in a compact binary representation /*! This archive outputs data to a stream in an extremely compact binary representation with as little extra metadata as possible. This archive does nothing to ensure that the endianness of the saved and loaded data is the same. If you need to have portability over architectures with different endianness, use PortableBinaryOutputArchive. When using a binary archive and a file stream, you must use the std::ios::binary format flag to avoid having your data altered inadvertently. \ingroup Archives */ class BinaryOutputArchive : public OutputArchive<BinaryOutputArchive, AllowEmptyClassElision> { public: //! Construct, outputting to the provided stream /*! @param stream The stream to output to. Can be a stringstream, a file stream, or even cout! */ BinaryOutputArchive(std::ostream & stream) : OutputArchive<BinaryOutputArchive, AllowEmptyClassElision>(this), itsStream(stream) { } ~BinaryOutputArchive() CEREAL_NOEXCEPT = default; //! Writes size bytes of data to the output stream void saveBinary( const void * data, std::streamsize size ) { auto const writtenSize = itsStream.rdbuf()->sputn( reinterpret_cast<const char*>( data ), size ); if(writtenSize != size) throw Exception("Failed to write " + std::to_string(size) + " bytes to output stream! Wrote " + std::to_string(writtenSize)); } private: std::ostream & itsStream; }; // ###################################################################### //! An input archive designed to load data saved using BinaryOutputArchive /* This archive does nothing to ensure that the endianness of the saved and loaded data is the same. If you need to have portability over architectures with different endianness, use PortableBinaryOutputArchive. When using a binary archive and a file stream, you must use the std::ios::binary format flag to avoid having your data altered inadvertently. \ingroup Archives */ class BinaryInputArchive : public InputArchive<BinaryInputArchive, AllowEmptyClassElision> { public: //! Construct, loading from the provided stream BinaryInputArchive(std::istream & stream) : InputArchive<BinaryInputArchive, AllowEmptyClassElision>(this), itsStream(stream) { } ~BinaryInputArchive() CEREAL_NOEXCEPT = default; //! Reads size bytes of data from the input stream void loadBinary( void * const data, std::streamsize size ) { auto const readSize = itsStream.rdbuf()->sgetn( reinterpret_cast<char*>( data ), size ); if(readSize != size) throw Exception("Failed to read " + std::to_string(size) + " bytes from input stream! Read " + std::to_string(readSize)); } private: std::istream & itsStream; }; // ###################################################################### // Common BinaryArchive serialization functions //! Saving for POD types to binary template<class T> inline typename std::enable_if<std::is_arithmetic<T>::value, void>::type CEREAL_SAVE_FUNCTION_NAME(BinaryOutputArchive & ar, T const & t) { ar.saveBinary(std::addressof(t), sizeof(t)); } //! Loading for POD types from binary template<class T> inline typename std::enable_if<std::is_arithmetic<T>::value, void>::type CEREAL_LOAD_FUNCTION_NAME(BinaryInputArchive & ar, T & t) { ar.loadBinary(std::addressof(t), sizeof(t)); } //! Serializing NVP types to binary template <class Archive, class T> inline CEREAL_ARCHIVE_RESTRICT(BinaryInputArchive, BinaryOutputArchive) CEREAL_SERIALIZE_FUNCTION_NAME( Archive & ar, NameValuePair<T> & t ) { ar( t.value ); } //! Serializing SizeTags to binary template <class Archive, class T> inline CEREAL_ARCHIVE_RESTRICT(BinaryInputArchive, BinaryOutputArchive) CEREAL_SERIALIZE_FUNCTION_NAME( Archive & ar, SizeTag<T> & t ) { ar( t.size ); } //! Saving binary data template <class T> inline void CEREAL_SAVE_FUNCTION_NAME(BinaryOutputArchive & ar, BinaryData<T> const & bd) { ar.saveBinary( bd.data, static_cast<std::streamsize>( bd.size ) ); } //! Loading binary data template <class T> inline void CEREAL_LOAD_FUNCTION_NAME(BinaryInputArchive & ar, BinaryData<T> & bd) { ar.loadBinary(bd.data, static_cast<std::streamsize>( bd.size ) ); } } // namespace cereal // register archives for polymorphic support CEREAL_REGISTER_ARCHIVE(cereal::BinaryOutputArchive) CEREAL_REGISTER_ARCHIVE(cereal::BinaryInputArchive) // tie input and output archives together CEREAL_SETUP_ARCHIVE_TRAITS(cereal::BinaryInputArchive, cereal::BinaryOutputArchive) #endif // CEREAL_ARCHIVES_BINARY_HPP_
0
coqui_public_repos/STT/native_client/kenlm/lm
coqui_public_repos/STT/native_client/kenlm/lm/common/ngram.hh
#ifndef LM_COMMON_NGRAM_H #define LM_COMMON_NGRAM_H #include "../weights.hh" #include "../word_index.hh" #include <cstddef> #include <cassert> #include <stdint.h> #include <cstring> namespace lm { class NGramHeader { public: NGramHeader(void *begin, std::size_t order) : begin_(static_cast<WordIndex*>(begin)), end_(begin_ + order) {} NGramHeader() : begin_(NULL), end_(NULL) {} const uint8_t *Base() const { return reinterpret_cast<const uint8_t*>(begin_); } uint8_t *Base() { return reinterpret_cast<uint8_t*>(begin_); } void ReBase(void *to) { std::size_t difference = end_ - begin_; begin_ = reinterpret_cast<WordIndex*>(to); end_ = begin_ + difference; } // These are for the vocab index. // Lower-case in deference to STL. const WordIndex *begin() const { return begin_; } WordIndex *begin() { return begin_; } const WordIndex *end() const { return end_; } WordIndex *end() { return end_; } std::size_t size() const { return end_ - begin_; } std::size_t Order() const { return end_ - begin_; } private: WordIndex *begin_, *end_; }; template <class PayloadT> class NGram : public NGramHeader { public: typedef PayloadT Payload; NGram() : NGramHeader(NULL, 0) {} NGram(void *begin, std::size_t order) : NGramHeader(begin, order) {} // Would do operator++ but that can get confusing for a stream. void NextInMemory() { ReBase(&Value() + 1); } static std::size_t TotalSize(std::size_t order) { return order * sizeof(WordIndex) + sizeof(Payload); } std::size_t TotalSize() const { // Compiler should optimize this. return TotalSize(Order()); } static std::size_t OrderFromSize(std::size_t size) { std::size_t ret = (size - sizeof(Payload)) / sizeof(WordIndex); assert(size == TotalSize(ret)); return ret; } const Payload &Value() const { return *reinterpret_cast<const Payload *>(end()); } Payload &Value() { return *reinterpret_cast<Payload *>(end()); } }; } // namespace lm #endif // LM_COMMON_NGRAM_H
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/models/glow_tts.py
import math from typing import Dict, List, Tuple, Union import torch from coqpit import Coqpit from torch import nn from torch.cuda.amp.autocast_mode import autocast from torch.nn import functional as F from TTS.tts.configs.glow_tts_config import GlowTTSConfig from TTS.tts.layers.glow_tts.decoder import Decoder from TTS.tts.layers.glow_tts.encoder import Encoder from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.io import load_fsspec class GlowTTS(BaseTTS): """GlowTTS model. Paper:: https://arxiv.org/abs/2005.11129 Paper abstract:: Recently, text-to-speech (TTS) models such as FastSpeech and ParaNet have been proposed to generate mel-spectrograms from text in parallel. Despite the advantage, the parallel TTS models cannot be trained without guidance from autoregressive TTS models as their external aligners. In this work, we propose Glow-TTS, a flow-based generative model for parallel TTS that does not require any external aligner. By combining the properties of flows and dynamic programming, the proposed model searches for the most probable monotonic alignment between text and the latent representation of speech on its own. We demonstrate that enforcing hard monotonic alignments enables robust TTS, which generalizes to long utterances, and employing generative flows enables fast, diverse, and controllable speech synthesis. Glow-TTS obtains an order-of-magnitude speed-up over the autoregressive model, Tacotron 2, at synthesis with comparable speech quality. We further show that our model can be easily extended to a multi-speaker setting. Check :class:`TTS.tts.configs.glow_tts_config.GlowTTSConfig` for class arguments. Examples: Init only model layers. >>> from TTS.tts.configs.glow_tts_config import GlowTTSConfig >>> from TTS.tts.models.glow_tts import GlowTTS >>> config = GlowTTSConfig(num_chars=2) >>> model = GlowTTS(config) Fully init a model ready for action. All the class attributes and class members (e.g Tokenizer, AudioProcessor, etc.). are initialized internally based on config values. >>> from TTS.tts.configs.glow_tts_config import GlowTTSConfig >>> from TTS.tts.models.glow_tts import GlowTTS >>> config = GlowTTSConfig() >>> model = GlowTTS.init_from_config(config, verbose=False) """ def __init__( self, config: GlowTTSConfig, ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change self.config = config for key in config: setattr(self, key, config[key]) self.decoder_output_dim = config.out_channels # init multi-speaker layers if necessary self.init_multispeaker(config) self.run_data_dep_init = config.data_dep_init_steps > 0 self.encoder = Encoder( self.num_chars, out_channels=self.out_channels, hidden_channels=self.hidden_channels_enc, hidden_channels_dp=self.hidden_channels_dp, encoder_type=self.encoder_type, encoder_params=self.encoder_params, mean_only=self.mean_only, use_prenet=self.use_encoder_prenet, dropout_p_dp=self.dropout_p_dp, c_in_channels=self.c_in_channels, ) self.decoder = Decoder( self.out_channels, self.hidden_channels_dec, self.kernel_size_dec, self.dilation_rate, self.num_flow_blocks_dec, self.num_block_layers, dropout_p=self.dropout_p_dec, num_splits=self.num_splits, num_squeeze=self.num_squeeze, sigmoid_scale=self.sigmoid_scale, c_in_channels=self.c_in_channels, ) def init_multispeaker(self, config: Coqpit): """Init speaker embedding layer if `use_speaker_embedding` is True and set the expected speaker embedding vector dimension to the encoder layer channel size. If model uses d-vectors, then it only sets speaker embedding vector dimension to the d-vector dimension from the config. Args: config (Coqpit): Model configuration. """ self.embedded_speaker_dim = 0 # set number of speakers - if num_speakers is set in config, use it, otherwise use speaker_manager if self.speaker_manager is not None: self.num_speakers = self.speaker_manager.num_speakers # set ultimate speaker embedding size if config.use_d_vector_file: self.embedded_speaker_dim = ( config.d_vector_dim if "d_vector_dim" in config and config.d_vector_dim is not None else 512 ) if self.speaker_manager is not None: assert ( config.d_vector_dim == self.speaker_manager.embedding_dim ), " [!] d-vector dimension mismatch b/w config and speaker manager." # init speaker embedding layer if config.use_speaker_embedding and not config.use_d_vector_file: print(" > Init speaker_embedding layer.") self.embedded_speaker_dim = self.hidden_channels_enc self.emb_g = nn.Embedding(self.num_speakers, self.hidden_channels_enc) nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) # set conditioning dimensions self.c_in_channels = self.embedded_speaker_dim @staticmethod def compute_outputs(attn, o_mean, o_log_scale, x_mask): """Compute and format the mode outputs with the given alignment map""" y_mean = torch.matmul(attn.squeeze(1).transpose(1, 2), o_mean.transpose(1, 2)).transpose( 1, 2 ) # [b, t', t], [b, t, d] -> [b, d, t'] y_log_scale = torch.matmul(attn.squeeze(1).transpose(1, 2), o_log_scale.transpose(1, 2)).transpose( 1, 2 ) # [b, t', t], [b, t, d] -> [b, d, t'] # compute total duration with adjustment o_attn_dur = torch.log(1 + torch.sum(attn, -1)) * x_mask return y_mean, y_log_scale, o_attn_dur def unlock_act_norm_layers(self): """Unlock activation normalization layers for data depended initalization.""" for f in self.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(True) def lock_act_norm_layers(self): """Lock activation normalization layers.""" for f in self.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(False) def _set_speaker_input(self, aux_input: Dict): if aux_input is None: d_vectors = None speaker_ids = None else: d_vectors = aux_input.get("d_vectors", None) speaker_ids = aux_input.get("speaker_ids", None) if d_vectors is not None and speaker_ids is not None: raise ValueError("[!] Cannot use d-vectors and speaker-ids together.") if speaker_ids is not None and not hasattr(self, "emb_g"): raise ValueError("[!] Cannot use speaker-ids without enabling speaker embedding.") g = speaker_ids if speaker_ids is not None else d_vectors return g def _speaker_embedding(self, aux_input: Dict) -> Union[torch.tensor, None]: g = self._set_speaker_input(aux_input) # speaker embedding if g is not None: if hasattr(self, "emb_g"): # use speaker embedding layer if not g.size(): # if is a scalar g = g.unsqueeze(0) # unsqueeze g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h, 1] else: # use d-vector g = F.normalize(g).unsqueeze(-1) # [b, h, 1] return g def forward( self, x, x_lengths, y, y_lengths=None, aux_input={"d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value """ Args: x (torch.Tensor): Input text sequence ids. :math:`[B, T_en]` x_lengths (torch.Tensor): Lengths of input text sequences. :math:`[B]` y (torch.Tensor): Target mel-spectrogram frames. :math:`[B, T_de, C_mel]` y_lengths (torch.Tensor): Lengths of target mel-spectrogram frames. :math:`[B]` aux_input (Dict): Auxiliary inputs. `d_vectors` is speaker embedding vectors for a multi-speaker model. :math:`[B, D_vec]`. `speaker_ids` is speaker ids for a multi-speaker model usind speaker-embedding layer. :math:`B` Returns: Dict: - z: :math: `[B, T_de, C]` - logdet: :math:`B` - y_mean: :math:`[B, T_de, C]` - y_log_scale: :math:`[B, T_de, C]` - alignments: :math:`[B, T_en, T_de]` - durations_log: :math:`[B, T_en, 1]` - total_durations_log: :math:`[B, T_en, 1]` """ # [B, T, C] -> [B, C, T] y = y.transpose(1, 2) y_max_length = y.size(2) # norm speaker embeddings g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # drop redisual frames wrt num_squeeze and set y_lengths. y, y_lengths, y_max_length, attn = self.preprocess(y, y_lengths, y_max_length, None) # create masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(x_mask.dtype) # [B, 1, T_en, T_de] attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) # decoder pass z, logdet = self.decoder(y, y_mask, g=g, reverse=False) # find the alignment path with torch.no_grad(): o_scale = torch.exp(-2 * o_log_scale) logp1 = torch.sum(-0.5 * math.log(2 * math.pi) - o_log_scale, [1]).unsqueeze(-1) # [b, t, 1] logp2 = torch.matmul(o_scale.transpose(1, 2), -0.5 * (z**2)) # [b, t, d] x [b, d, t'] = [b, t, t'] logp3 = torch.matmul((o_mean * o_scale).transpose(1, 2), z) # [b, t, d] x [b, d, t'] = [b, t, t'] logp4 = torch.sum(-0.5 * (o_mean**2) * o_scale, [1]).unsqueeze(-1) # [b, t, 1] logp = logp1 + logp2 + logp3 + logp4 # [b, t, t'] attn = maximum_path(logp, attn_mask.squeeze(1)).unsqueeze(1).detach() y_mean, y_log_scale, o_attn_dur = self.compute_outputs(attn, o_mean, o_log_scale, x_mask) attn = attn.squeeze(1).permute(0, 2, 1) outputs = { "z": z.transpose(1, 2), "logdet": logdet, "y_mean": y_mean.transpose(1, 2), "y_log_scale": y_log_scale.transpose(1, 2), "alignments": attn, "durations_log": o_dur_log.transpose(1, 2), "total_durations_log": o_attn_dur.transpose(1, 2), } return outputs @torch.no_grad() def inference_with_MAS( self, x, x_lengths, y=None, y_lengths=None, aux_input={"d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value """ It's similar to the teacher forcing in Tacotron. It was proposed in: https://arxiv.org/abs/2104.05557 Shapes: - x: :math:`[B, T]` - x_lenghts: :math:`B` - y: :math:`[B, T, C]` - y_lengths: :math:`B` - g: :math:`[B, C] or B` """ y = y.transpose(1, 2) y_max_length = y.size(2) # norm speaker embeddings g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # drop redisual frames wrt num_squeeze and set y_lengths. y, y_lengths, y_max_length, attn = self.preprocess(y, y_lengths, y_max_length, None) # create masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) # decoder pass z, logdet = self.decoder(y, y_mask, g=g, reverse=False) # find the alignment path between z and encoder output o_scale = torch.exp(-2 * o_log_scale) logp1 = torch.sum(-0.5 * math.log(2 * math.pi) - o_log_scale, [1]).unsqueeze(-1) # [b, t, 1] logp2 = torch.matmul(o_scale.transpose(1, 2), -0.5 * (z**2)) # [b, t, d] x [b, d, t'] = [b, t, t'] logp3 = torch.matmul((o_mean * o_scale).transpose(1, 2), z) # [b, t, d] x [b, d, t'] = [b, t, t'] logp4 = torch.sum(-0.5 * (o_mean**2) * o_scale, [1]).unsqueeze(-1) # [b, t, 1] logp = logp1 + logp2 + logp3 + logp4 # [b, t, t'] attn = maximum_path(logp, attn_mask.squeeze(1)).unsqueeze(1).detach() y_mean, y_log_scale, o_attn_dur = self.compute_outputs(attn, o_mean, o_log_scale, x_mask) attn = attn.squeeze(1).permute(0, 2, 1) # get predited aligned distribution z = y_mean * y_mask # reverse the decoder and predict using the aligned distribution y, logdet = self.decoder(z, y_mask, g=g, reverse=True) outputs = { "model_outputs": z.transpose(1, 2), "logdet": logdet, "y_mean": y_mean.transpose(1, 2), "y_log_scale": y_log_scale.transpose(1, 2), "alignments": attn, "durations_log": o_dur_log.transpose(1, 2), "total_durations_log": o_attn_dur.transpose(1, 2), } return outputs @torch.no_grad() def decoder_inference( self, y, y_lengths=None, aux_input={"d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value """ Shapes: - y: :math:`[B, T, C]` - y_lengths: :math:`B` - g: :math:`[B, C] or B` """ y = y.transpose(1, 2) y_max_length = y.size(2) g = self._speaker_embedding(aux_input) y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(y.dtype) # decoder pass z, logdet = self.decoder(y, y_mask, g=g, reverse=False) # reverse decoder and predict y, logdet = self.decoder(z, y_mask, g=g, reverse=True) outputs = {} outputs["model_outputs"] = y.transpose(1, 2) outputs["logdet"] = logdet return outputs @torch.no_grad() def inference( self, x, aux_input={"x_lengths": None, "d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value x_lengths = aux_input["x_lengths"] g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # compute output durations w = (torch.exp(o_dur_log) - 1) * x_mask * self.length_scale w_ceil = torch.clamp_min(torch.ceil(w), 1) y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() y_max_length = None # compute masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) # compute attention mask attn = generate_path(w_ceil.squeeze(1), attn_mask.squeeze(1)).unsqueeze(1) y_mean, y_log_scale, o_attn_dur = self.compute_outputs(attn, o_mean, o_log_scale, x_mask) z = (y_mean + torch.exp(y_log_scale) * torch.randn_like(y_mean) * self.inference_noise_scale) * y_mask # decoder pass y, logdet = self.decoder(z, y_mask, g=g, reverse=True) attn = attn.squeeze(1).permute(0, 2, 1) outputs = { "model_outputs": y.transpose(1, 2), "logdet": logdet, "y_mean": y_mean.transpose(1, 2), "y_log_scale": y_log_scale.transpose(1, 2), "alignments": attn, "durations_log": o_dur_log.transpose(1, 2), "total_durations_log": o_attn_dur.transpose(1, 2), } return outputs def train_step(self, batch: dict, criterion: nn.Module): """A single training step. Forward pass and loss computation. Run data depended initialization for the first `config.data_dep_init_steps` steps. Args: batch (dict): [description] criterion (nn.Module): [description] """ text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] d_vectors = batch["d_vectors"] speaker_ids = batch["speaker_ids"] if self.run_data_dep_init and self.training: # compute data-dependent initialization of activation norm layers self.unlock_act_norm_layers() with torch.no_grad(): _ = self.forward( text_input, text_lengths, mel_input, mel_lengths, aux_input={"d_vectors": d_vectors, "speaker_ids": speaker_ids}, ) outputs = None loss_dict = None self.lock_act_norm_layers() else: # normal training step outputs = self.forward( text_input, text_lengths, mel_input, mel_lengths, aux_input={"d_vectors": d_vectors, "speaker_ids": speaker_ids}, ) with autocast(enabled=False): # avoid mixed_precision in criterion loss_dict = criterion( outputs["z"].float(), outputs["y_mean"].float(), outputs["y_log_scale"].float(), outputs["logdet"].float(), mel_lengths, outputs["durations_log"].float(), outputs["total_durations_log"].float(), text_lengths, ) return outputs, loss_dict def _create_logs(self, batch, outputs, ap): alignments = outputs["alignments"] text_input = batch["text_input"][:1] if batch["text_input"] is not None else None text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] d_vectors = batch["d_vectors"][:1] if batch["d_vectors"] is not None else None speaker_ids = batch["speaker_ids"][:1] if batch["speaker_ids"] is not None else None # model runs reverse flow to predict spectrograms pred_outputs = self.inference( text_input, aux_input={"x_lengths": text_lengths[:1], "d_vectors": d_vectors, "speaker_ids": speaker_ids}, ) model_outputs = pred_outputs["model_outputs"] pred_spec = model_outputs[0].data.cpu().numpy() gt_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "prediction": plot_spectrogram(pred_spec, ap, output_fig=False), "ground_truth": plot_spectrogram(gt_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } # Sample audio train_audio = ap.inv_melspectrogram(pred_spec.T) return figures, {"audio": train_audio} def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use figures, audios = self._create_logs(batch, outputs, self.ap) logger.train_figures(steps, figures) logger.train_audios(steps, audios, self.ap.sample_rate) @torch.no_grad() def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: figures, audios = self._create_logs(batch, outputs, self.ap) logger.eval_figures(steps, figures) logger.eval_audios(steps, audios, self.ap.sample_rate) @torch.no_grad() def test_run(self, assets: Dict) -> Tuple[Dict, Dict]: """Generic test run for `tts` models used by `Trainer`. You can override this for a different behaviour. Returns: Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard. """ print(" | > Synthesizing test sentences.") test_audios = {} test_figures = {} test_sentences = self.config.test_sentences aux_inputs = self._get_test_aux_input() if len(test_sentences) == 0: print(" | [!] No test sentences provided.") else: for idx, sen in enumerate(test_sentences): outputs = synthesis( self, sen, self.config, "cuda" in str(next(self.parameters()).device), speaker_id=aux_inputs["speaker_id"], d_vector=aux_inputs["d_vector"], style_wav=aux_inputs["style_wav"], use_griffin_lim=True, do_trim_silence=False, ) test_audios["{}-audio".format(idx)] = outputs["wav"] test_figures["{}-prediction".format(idx)] = plot_spectrogram( outputs["outputs"]["model_outputs"], self.ap, output_fig=False ) test_figures["{}-alignment".format(idx)] = plot_alignment(outputs["alignments"], output_fig=False) return test_figures, test_audios def preprocess(self, y, y_lengths, y_max_length, attn=None): if y_max_length is not None: y_max_length = (y_max_length // self.num_squeeze) * self.num_squeeze y = y[:, :, :y_max_length] if attn is not None: attn = attn[:, :, :, :y_max_length] y_lengths = torch.div(y_lengths, self.num_squeeze, rounding_mode="floor") * self.num_squeeze return y, y_lengths, y_max_length, attn def store_inverse(self): self.decoder.store_inverse() def load_checkpoint( self, config, checkpoint_path, eval=False ): # pylint: disable=unused-argument, redefined-builtin state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) if eval: self.eval() self.store_inverse() assert not self.training @staticmethod def get_criterion(): from TTS.tts.layers.losses import GlowTTSLoss # pylint: disable=import-outside-toplevel return GlowTTSLoss() def on_train_step_start(self, trainer): """Decide on every training step wheter enable/disable data depended initialization.""" self.run_data_dep_init = trainer.total_steps_done < self.data_dep_init_steps @staticmethod def init_from_config(config: "GlowTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ from TTS.utils.audio import AudioProcessor ap = AudioProcessor.init_from_config(config, verbose) tokenizer, new_config = TTSTokenizer.init_from_config(config) speaker_manager = SpeakerManager.init_from_config(config, samples) return GlowTTS(new_config, ap, tokenizer, speaker_manager)
0
coqui_public_repos/STT
coqui_public_repos/STT/notebooks/easy_transfer_learning.ipynb
## Install Coqui STT ! pip install -U pip ! pip install coqui_stt_training### Download pre-trained model import os import tarfile from coqui_stt_training.util.downloader import maybe_download def download_pretrained_model(): model_dir="english/" if not os.path.exists("english/coqui-yesno-checkpoints"): maybe_download("model.tar.gz", model_dir, "https://github.com/coqui-ai/STT-models/releases/download/english%2Fcoqui%2Fyesno-v0.0.1/coqui-yesno-checkpoints.tar.gz") print('\nNo extracted pre-trained model found. Extracting now...') tar = tarfile.open("english/model.tar.gz") tar.extractall("english/") tar.close() else: print('Found "english/coqui-yesno-checkpoints" - not extracting.') # Download + extract pre-trained English model download_pretrained_model()### Download sample data from coqui_stt_training.util.downloader import maybe_download def download_sample_data(): data_dir="russian/" maybe_download("ru.wav", data_dir, "https://raw.githubusercontent.com/coqui-ai/STT/main/data/smoke_test/russian_sample_data/ru.wav") maybe_download("ru.csv", data_dir, "https://raw.githubusercontent.com/coqui-ai/STT/main/data/smoke_test/russian_sample_data/ru.csv") maybe_download("alphabet.txt", data_dir, "https://raw.githubusercontent.com/coqui-ai/STT/main/data/smoke_test/russian_sample_data/alphabet.ru") # Download sample Russian data download_sample_data()from coqui_stt_training.util.config import initialize_globals_from_args initialize_globals_from_args( n_hidden=64, load_checkpoint_dir="english/coqui-yesno-checkpoints", save_checkpoint_dir="russian/checkpoints", drop_source_layers=1, alphabet_config_path="russian/alphabet.txt", train_files=["russian/ru.csv"], dev_files=["russian/ru.csv"], epochs=100, load_cudnn=True, )from coqui_stt_training.util.config import Config print(Config.to_json())from coqui_stt_training.train import train # use maximum one GPU os.environ["CUDA_VISIBLE_DEVICES"] = "0" train()from coqui_stt_training.util.config import Config Config.test_files=["russian/ru.csv"] Config.load_checkpoint_dir="russian/checkpoints"from coqui_stt_training.evaluate import test test()
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/concat.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_CONCAT_H_ #define FST_SCRIPT_CONCAT_H_ #include <utility> #include <fst/concat.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using ConcatArgs1 = std::pair<MutableFstClass *, const FstClass &>; template <class Arc> void Concat(ConcatArgs1 *args) { MutableFst<Arc> *ofst = std::get<0>(*args)->GetMutableFst<Arc>(); const Fst<Arc> &ifst = *(std::get<1>(*args).GetFst<Arc>()); Concat(ofst, ifst); } using ConcatArgs2 = std::pair<const FstClass &, MutableFstClass *>; template <class Arc> void Concat(ConcatArgs2 *args) { const Fst<Arc> &ifst = *(std::get<0>(*args).GetFst<Arc>()); MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>(); Concat(ifst, ofst); } void Concat(MutableFstClass *ofst, const FstClass &ifst); void Concat(const FstClass &ifst, MutableFstClass *ofst); } // namespace script } // namespace fst #endif // FST_SCRIPT_CONCAT_H_
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/util/spaces.cc
#include "spaces.hh" namespace util { // Sigh this is the only way I could come up with to do a _const_ bool. It has ' ', '\f', '\n', '\r', '\t', and '\v' (same as isspace on C locale). const bool kSpaces[256] = {0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; } // namespace util
0
coqui_public_repos/TTS/TTS/encoder
coqui_public_repos/TTS/TTS/encoder/models/lstm.py
import torch from torch import nn from TTS.encoder.models.base_encoder import BaseEncoder class LSTMWithProjection(nn.Module): def __init__(self, input_size, hidden_size, proj_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.proj_size = proj_size self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True) self.linear = nn.Linear(hidden_size, proj_size, bias=False) def forward(self, x): self.lstm.flatten_parameters() o, (_, _) = self.lstm(x) return self.linear(o) class LSTMWithoutProjection(nn.Module): def __init__(self, input_dim, lstm_dim, proj_dim, num_lstm_layers): super().__init__() self.lstm = nn.LSTM(input_size=input_dim, hidden_size=lstm_dim, num_layers=num_lstm_layers, batch_first=True) self.linear = nn.Linear(lstm_dim, proj_dim, bias=True) self.relu = nn.ReLU() def forward(self, x): _, (hidden, _) = self.lstm(x) return self.relu(self.linear(hidden[-1])) class LSTMSpeakerEncoder(BaseEncoder): def __init__( self, input_dim, proj_dim=256, lstm_dim=768, num_lstm_layers=3, use_lstm_with_projection=True, use_torch_spec=False, audio_config=None, ): super().__init__() self.use_lstm_with_projection = use_lstm_with_projection self.use_torch_spec = use_torch_spec self.audio_config = audio_config self.proj_dim = proj_dim layers = [] # choise LSTM layer if use_lstm_with_projection: layers.append(LSTMWithProjection(input_dim, lstm_dim, proj_dim)) for _ in range(num_lstm_layers - 1): layers.append(LSTMWithProjection(proj_dim, lstm_dim, proj_dim)) self.layers = nn.Sequential(*layers) else: self.layers = LSTMWithoutProjection(input_dim, lstm_dim, proj_dim, num_lstm_layers) self.instancenorm = nn.InstanceNorm1d(input_dim) if self.use_torch_spec: self.torch_spec = self.get_torch_mel_spectrogram_class(audio_config) else: self.torch_spec = None self._init_layers() def _init_layers(self): for name, param in self.layers.named_parameters(): if "bias" in name: nn.init.constant_(param, 0.0) elif "weight" in name: nn.init.xavier_normal_(param) def forward(self, x, l2_norm=True): """Forward pass of the model. Args: x (Tensor): Raw waveform signal or spectrogram frames. If input is a waveform, `torch_spec` must be `True` to compute the spectrogram on-the-fly. l2_norm (bool): Whether to L2-normalize the outputs. Shapes: - x: :math:`(N, 1, T_{in})` or :math:`(N, D_{spec}, T_{in})` """ with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): if self.use_torch_spec: x.squeeze_(1) x = self.torch_spec(x) x = self.instancenorm(x).transpose(1, 2) d = self.layers(x) if self.use_lstm_with_projection: d = d[:, -1] if l2_norm: d = torch.nn.functional.normalize(d, p=2, dim=1) return d
0
coqui_public_repos/snakepit/src
coqui_public_repos/snakepit/src/models/Node-model.js
const Parallel = require('async-parallel') const Sequelize = require('sequelize') const sequelize = require('./db.js') const Resource = require('./Resource-model.js') const lxd = require('../utils/lxd.js') const config = require('../config.js') var Node = sequelize.define('node', { id: { type: Sequelize.STRING, primaryKey: true }, endpoint: { type: Sequelize.STRING, allowNull: false }, password: { type: Sequelize.STRING, allowNull: true }, online: { type: Sequelize.BOOLEAN, allowNull: false, defaultValue: false }, available: { type: Sequelize.BOOLEAN, allowNull: false, defaultValue: false }, since: { type: Sequelize.DATE, allowNull: false } }) Node.hasMany(Resource, { onDelete: 'cascade' }) Resource.belongsTo(Node) Node.beforeCreate(async node => { if (!(await Node.findOne({ where: { endpoint: node.endpoint } }))) { try { await lxd.post(node.endpoint, 'certificates', { type: 'client', password: node.password }) } catch (ex) { if (!ex.response || !ex.response.data || !ex.response.data.error || ex.response.data.error != 'Certificate already in trust store') { throw ex } } } delete node.password }) Node.afterDestroy(async node => { if (node.endpoint != config.endpoint) { let certificates = await lxd.get(node.endpoint, 'certificates') certificates = certificates.map(c => { c = c.split('/') return c[c.length - 1] }) await Parallel.each(certificates, async c => { let cpath = 'certificates/' + c let cinfo = await lxd.get(node.endpoint, cpath) if (cinfo.certificate == config.lxdCert) { await lxd.delete(node.endpoint, cpath) } }) } }) module.exports = Node
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/docker-build-base.tyml
$if: '(event.event != "push") && (event.event != "tag")' then: taskId: ${taskcluster.taskId} provisionerId: ${taskcluster.docker.provisionerId} workerType: ${build.workerType} taskGroupId: ${taskcluster.taskGroupId} schedulerId: ${taskcluster.schedulerId} created: { $fromNow: '0 sec' } deadline: { $fromNow: '1 day' } expires: { $fromNow: '7 days' } payload: maxRunTime: { $eval: to_int(build.maxRunTime) } image: "ubuntu:14.04" features: dind: true env: DOCKER_API_VERSION: "1.18" command: - "/bin/bash" - "--login" - "-cxe" - $let: dockerfile: { $eval: strip(str(build.dockerfile)) } in: > apt-get -qq -y remove --purge ubuntu-advantage-tools && apt-get -qq update && apt-get -qq -y install git wget pkg-config apt-transport-https ca-certificates curl software-properties-common make && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" && apt-get -qq update && apt-get -qq -y install docker-ce && mkdir -p /opt/deepspeech && git clone --quiet ${event.head.repo.url} /opt/deepspeech && cd /opt/deepspeech && git checkout --quiet ${event.head.sha} && make ${dockerfile} DEEPSPEECH_REPO=${event.head.repo.url} DEEPSPEECH_SHA=${event.head.sha} && docker build --file ${dockerfile} . artifacts: "public": type: "directory" path: "/tmp/artifacts/" expires: { $fromNow: '7 days' } metadata: name: ${build.metadata.name} description: ${build.metadata.description} owner: ${event.head.user.email} source: ${event.head.repo.url}
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/aux_tests/test_speaker_manager.py
import os import unittest import numpy as np import torch from trainer.io import save_checkpoint from tests import get_tests_input_path from TTS.config import load_config from TTS.encoder.utils.generic_utils import setup_encoder_model from TTS.tts.utils.speakers import SpeakerManager from TTS.utils.audio import AudioProcessor encoder_config_path = os.path.join(get_tests_input_path(), "test_speaker_encoder_config.json") encoder_model_path = os.path.join(get_tests_input_path(), "checkpoint_0.pth") sample_wav_path = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0001.wav") sample_wav_path2 = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0002.wav") d_vectors_file_path = os.path.join(get_tests_input_path(), "../data/dummy_speakers.json") d_vectors_file_pth_path = os.path.join(get_tests_input_path(), "../data/dummy_speakers.pth") class SpeakerManagerTest(unittest.TestCase): """Test SpeakerManager for loading embedding files and computing d_vectors from waveforms""" @staticmethod def test_speaker_embedding(): # load config config = load_config(encoder_config_path) config.audio.resample = True # create a dummy speaker encoder model = setup_encoder_model(config) save_checkpoint(config, model, None, None, 0, 0, get_tests_input_path()) # load audio processor and speaker encoder ap = AudioProcessor(**config.audio) manager = SpeakerManager(encoder_model_path=encoder_model_path, encoder_config_path=encoder_config_path) # load a sample audio and compute embedding waveform = ap.load_wav(sample_wav_path) mel = ap.melspectrogram(waveform) d_vector = manager.compute_embeddings(mel) assert d_vector.shape[1] == 256 # compute d_vector directly from an input file d_vector = manager.compute_embedding_from_clip(sample_wav_path) d_vector2 = manager.compute_embedding_from_clip(sample_wav_path) d_vector = torch.FloatTensor(d_vector) d_vector2 = torch.FloatTensor(d_vector2) assert d_vector.shape[0] == 256 assert (d_vector - d_vector2).sum() == 0.0 # compute d_vector from a list of wav files. d_vector3 = manager.compute_embedding_from_clip([sample_wav_path, sample_wav_path2]) d_vector3 = torch.FloatTensor(d_vector3) assert d_vector3.shape[0] == 256 assert (d_vector - d_vector3).sum() != 0.0 # remove dummy model os.remove(encoder_model_path) def test_dvector_file_processing(self): manager = SpeakerManager(d_vectors_file_path=d_vectors_file_path) self.assertEqual(manager.num_speakers, 1) self.assertEqual(manager.embedding_dim, 256) manager = SpeakerManager(d_vectors_file_path=d_vectors_file_pth_path) self.assertEqual(manager.num_speakers, 1) self.assertEqual(manager.embedding_dim, 256) d_vector = manager.get_embedding_by_clip(manager.clip_ids[0]) assert len(d_vector) == 256 d_vectors = manager.get_embeddings_by_name(manager.speaker_names[0]) assert len(d_vectors[0]) == 256 d_vector1 = manager.get_mean_embedding(manager.speaker_names[0], num_samples=2, randomize=True) assert len(d_vector1) == 256 d_vector2 = manager.get_mean_embedding(manager.speaker_names[0], num_samples=2, randomize=False) assert len(d_vector2) == 256 assert np.sum(np.array(d_vector1) - np.array(d_vector2)) != 0
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/map.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/map.h> #include <fst/script/script-impl.h> namespace fst { namespace script { FstClass *Map(const FstClass &ifst, MapType map_type, float delta, double power, const WeightClass &weight) { if (!ifst.WeightTypesMatch(weight, "Map")) return nullptr; MapInnerArgs iargs(ifst, map_type, delta, power, weight); MapArgs args(iargs); Apply<Operation<MapArgs>>("Map", ifst.ArcType(), &args); return args.retval; } REGISTER_FST_OPERATION(Map, StdArc, MapArgs); REGISTER_FST_OPERATION(Map, LogArc, MapArgs); REGISTER_FST_OPERATION(Map, Log64Arc, MapArgs); } // namespace script } // namespace fst
0
coqui_public_repos/stt-model-manager
coqui_public_repos/stt-model-manager/config/getHttpsConfig.js
'use strict'; const fs = require('fs'); const path = require('path'); const crypto = require('crypto'); const chalk = require('react-dev-utils/chalk'); const paths = require('./paths'); // Ensure the certificate and key provided are valid and if not // throw an easy to debug error function validateKeyAndCerts({ cert, key, keyFile, crtFile }) { let encrypted; try { // publicEncrypt will throw an error with an invalid cert encrypted = crypto.publicEncrypt(cert, Buffer.from('test')); } catch (err) { throw new Error( `The certificate "${chalk.yellow(crtFile)}" is invalid.\n${err.message}` ); } try { // privateDecrypt will throw an error with an invalid key crypto.privateDecrypt(key, encrypted); } catch (err) { throw new Error( `The certificate key "${chalk.yellow(keyFile)}" is invalid.\n${ err.message }` ); } } // Read file and throw an error if it doesn't exist function readEnvFile(file, type) { if (!fs.existsSync(file)) { throw new Error( `You specified ${chalk.cyan( type )} in your env, but the file "${chalk.yellow(file)}" can't be found.` ); } return fs.readFileSync(file); } // Get the https config // Return cert files if provided in env, otherwise just true or false function getHttpsConfig() { const { SSL_CRT_FILE, SSL_KEY_FILE, HTTPS } = process.env; const isHttps = HTTPS === 'true'; if (isHttps && SSL_CRT_FILE && SSL_KEY_FILE) { const crtFile = path.resolve(paths.appPath, SSL_CRT_FILE); const keyFile = path.resolve(paths.appPath, SSL_KEY_FILE); const config = { cert: readEnvFile(crtFile, 'SSL_CRT_FILE'), key: readEnvFile(keyFile, 'SSL_KEY_FILE'), }; validateKeyAndCerts({ ...config, keyFile, crtFile }); return config; } return isHttps; } module.exports = getHttpsConfig;
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/pdt/getters.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_PDT_GETTERS_H_ #define FST_EXTENSIONS_PDT_GETTERS_H_ #include <string> #include <fst/extensions/pdt/compose.h> #include <fst/extensions/pdt/replace.h> namespace fst { namespace script { bool GetPdtComposeFilter(const string &str, PdtComposeFilter *cf); bool GetPdtParserType(const string &str, PdtParserType *pt); } // namespace script } // namespace fst #endif // FST_EXTENSIONS_PDT_GETTERS_H_
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/platform/tracing.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <windows.h> #include <TraceLoggingProvider.h> TRACELOGGING_DECLARE_PROVIDER(telemetry_provider_handle);
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/far/sttable.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fstream> #include <fst/extensions/far/sttable.h> namespace fst { bool IsSTTable(const string &filename) { std::ifstream strm(filename); if (!strm.good()) return false; int32 magic_number = 0; ReadType(strm, &magic_number); return magic_number == kSTTableMagicNumber; } } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/compact/compact64_string-fst.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/fst.h> #include <fst/compact-fst.h> namespace fst { static FstRegisterer<CompactStringFst<StdArc, uint64>> CompactStringFst_StdArc_uint64_registerer; static FstRegisterer<CompactStringFst<LogArc, uint64>> CompactStringFst_LogArc_uint64_registerer; } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/shortest-distance.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_SHORTEST_DISTANCE_H_ #define FST_SCRIPT_SHORTEST_DISTANCE_H_ #include <tuple> #include <vector> #include <fst/queue.h> #include <fst/shortest-distance.h> #include <fst/script/fst-class.h> #include <fst/script/prune.h> #include <fst/script/script-impl.h> #include <fst/script/weight-class.h> namespace fst { namespace script { enum ArcFilterType { ANY_ARC_FILTER, EPSILON_ARC_FILTER, INPUT_EPSILON_ARC_FILTER, OUTPUT_EPSILON_ARC_FILTER }; struct ShortestDistanceOptions { const QueueType queue_type; const ArcFilterType arc_filter_type; const int64_t source; const float delta; ShortestDistanceOptions(QueueType queue_type, ArcFilterType arc_filter_type, int64_t source, float delta) : queue_type(queue_type), arc_filter_type(arc_filter_type), source(source), delta(delta) {} }; namespace internal { // Code to implement switching on queue and arc filter types. template <class Arc, class Queue, class ArcFilter> struct QueueConstructor { using Weight = typename Arc::Weight; static Queue *Construct(const Fst<Arc> &, const std::vector<Weight> *) { return new Queue(); } }; // Specializations to support queues with different constructors. template <class Arc, class ArcFilter> struct QueueConstructor<Arc, AutoQueue<typename Arc::StateId>, ArcFilter> { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // template<class Arc, class ArcFilter> static AutoQueue<StateId> *Construct(const Fst<Arc> &fst, const std::vector<Weight> *distance) { return new AutoQueue<StateId>(fst, distance, ArcFilter()); } }; template <class Arc, class ArcFilter> struct QueueConstructor< Arc, NaturalShortestFirstQueue<typename Arc::StateId, typename Arc::Weight>, ArcFilter> { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; static NaturalShortestFirstQueue<StateId, Weight> *Construct( const Fst<Arc> &, const std::vector<Weight> *distance) { return new NaturalShortestFirstQueue<StateId, Weight>(*distance); } }; template <class Arc, class ArcFilter> struct QueueConstructor<Arc, TopOrderQueue<typename Arc::StateId>, ArcFilter> { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; static TopOrderQueue<StateId> *Construct(const Fst<Arc> &fst, const std::vector<Weight> *) { return new TopOrderQueue<StateId>(fst, ArcFilter()); } }; template <class Arc, class Queue, class ArcFilter> void ShortestDistance(const Fst<Arc> &fst, std::vector<typename Arc::Weight> *distance, const ShortestDistanceOptions &opts) { std::unique_ptr<Queue> queue( QueueConstructor<Arc, Queue, ArcFilter>::Construct(fst, distance)); const fst::ShortestDistanceOptions<Arc, Queue, ArcFilter> sopts( queue.get(), ArcFilter(), opts.source, opts.delta); ShortestDistance(fst, distance, sopts); } template <class Arc, class Queue> void ShortestDistance(const Fst<Arc> &fst, std::vector<typename Arc::Weight> *distance, const ShortestDistanceOptions &opts) { switch (opts.arc_filter_type) { case ANY_ARC_FILTER: { ShortestDistance<Arc, Queue, AnyArcFilter<Arc>>(fst, distance, opts); return; } case EPSILON_ARC_FILTER: { ShortestDistance<Arc, Queue, EpsilonArcFilter<Arc>>(fst, distance, opts); return; } case INPUT_EPSILON_ARC_FILTER: { ShortestDistance<Arc, Queue, InputEpsilonArcFilter<Arc>>(fst, distance, opts); return; } case OUTPUT_EPSILON_ARC_FILTER: { ShortestDistance<Arc, Queue, OutputEpsilonArcFilter<Arc>>(fst, distance, opts); return; } default: { FSTERROR() << "ShortestDistance: Unknown arc filter type: " << opts.arc_filter_type; distance->clear(); distance->resize(1, Arc::Weight::NoWeight()); return; } } } } // namespace internal using ShortestDistanceArgs1 = std::tuple<const FstClass &, std::vector<WeightClass> *, const ShortestDistanceOptions &>; template <class Arc> void ShortestDistance(ShortestDistanceArgs1 *args) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>()); const auto &opts = std::get<2>(*args); std::vector<Weight> typed_distance; switch (opts.queue_type) { case AUTO_QUEUE: { internal::ShortestDistance<Arc, AutoQueue<StateId>>(fst, &typed_distance, opts); break; } case FIFO_QUEUE: { internal::ShortestDistance<Arc, FifoQueue<StateId>>(fst, &typed_distance, opts); break; } case LIFO_QUEUE: { internal::ShortestDistance<Arc, LifoQueue<StateId>>(fst, &typed_distance, opts); break; } case SHORTEST_FIRST_QUEUE: { internal::ShortestDistance<Arc, NaturalShortestFirstQueue<StateId, Weight>>( fst, &typed_distance, opts); break; } case STATE_ORDER_QUEUE: { internal::ShortestDistance<Arc, StateOrderQueue<StateId>>( fst, &typed_distance, opts); break; } case TOP_ORDER_QUEUE: { internal::ShortestDistance<Arc, TopOrderQueue<StateId>>( fst, &typed_distance, opts); break; } default: { FSTERROR() << "ShortestDistance: Unknown queue type: " << opts.queue_type; typed_distance.clear(); typed_distance.resize(1, Arc::Weight::NoWeight()); break; } } internal::CopyWeights(typed_distance, std::get<1>(*args)); } using ShortestDistanceArgs2 = std::tuple<const FstClass &, std::vector<WeightClass> *, bool, double>; template <class Arc> void ShortestDistance(ShortestDistanceArgs2 *args) { using Weight = typename Arc::Weight; const Fst<Arc> &fst = *(std::get<0>(*args).GetFst<Arc>()); std::vector<Weight> typed_distance; ShortestDistance(fst, &typed_distance, std::get<2>(*args), std::get<3>(*args)); internal::CopyWeights(typed_distance, std::get<1>(*args)); } void ShortestDistance(const FstClass &fst, std::vector<WeightClass> *distance, const ShortestDistanceOptions &opts); void ShortestDistance(const FstClass &ifst, std::vector<WeightClass> *distance, bool reverse = false, double delta = fst::kShortestDelta); } // namespace script } // namespace fst #endif // FST_SCRIPT_SHORTEST_DISTANCE_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/replace.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes for the recursive replacement of FSTs. #ifndef FST_REPLACE_H_ #define FST_REPLACE_H_ #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #include <fst/log.h> #include <fst/cache.h> #include <fst/expanded-fst.h> #include <fst/fst-decl.h> // For optional argument declarations. #include <fst/fst.h> #include <fst/matcher.h> #include <fst/replace-util.h> #include <fst/state-table.h> #include <fst/test-properties.h> namespace fst { // Replace state tables have the form: // // template <class Arc, class P> // class ReplaceStateTable { // public: // using Label = typename Arc::Label Label; // using StateId = typename Arc::StateId; // // using PrefixId = P; // using StateTuple = ReplaceStateTuple<StateId, PrefixId>; // using StackPrefix = ReplaceStackPrefix<Label, StateId>; // // // Required constructor. // ReplaceStateTable( // const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_list, // Label root); // // // Required copy constructor that does not copy state. // ReplaceStateTable(const ReplaceStateTable<Arc, PrefixId> &table); // // // Looks up state ID by tuple, adding it if it doesn't exist. // StateId FindState(const StateTuple &tuple); // // // Looks up state tuple by ID. // const StateTuple &Tuple(StateId id) const; // // // Lookus up prefix ID by stack prefix, adding it if it doesn't exist. // PrefixId FindPrefixId(const StackPrefix &stack_prefix); // // // Looks up stack prefix by ID. // const StackPrefix &GetStackPrefix(PrefixId id) const; // }; // Tuple that uniquely defines a state in replace. template <class S, class P> struct ReplaceStateTuple { using StateId = S; using PrefixId = P; ReplaceStateTuple(PrefixId prefix_id = -1, StateId fst_id = kNoStateId, StateId fst_state = kNoStateId) : prefix_id(prefix_id), fst_id(fst_id), fst_state(fst_state) {} PrefixId prefix_id; // Index in prefix table. StateId fst_id; // Current FST being walked. StateId fst_state; // Current state in FST being walked (not to be // confused with the thse StateId of the combined FST). }; // Equality of replace state tuples. template <class StateId, class PrefixId> inline bool operator==(const ReplaceStateTuple<StateId, PrefixId> &x, const ReplaceStateTuple<StateId, PrefixId> &y) { return x.prefix_id == y.prefix_id && x.fst_id == y.fst_id && x.fst_state == y.fst_state; } // Functor returning true for tuples corresponding to states in the root FST. template <class StateId, class PrefixId> class ReplaceRootSelector { public: bool operator()(const ReplaceStateTuple<StateId, PrefixId> &tuple) const { return tuple.prefix_id == 0; } }; // Functor for fingerprinting replace state tuples. template <class StateId, class PrefixId> class ReplaceFingerprint { public: explicit ReplaceFingerprint(const std::vector<uint64> *size_array) : size_array_(size_array) {} uint64 operator()(const ReplaceStateTuple<StateId, PrefixId> &tuple) const { return tuple.prefix_id * size_array_->back() + size_array_->at(tuple.fst_id - 1) + tuple.fst_state; } private: const std::vector<uint64> *size_array_; }; // Useful when the fst_state uniquely define the tuple. template <class StateId, class PrefixId> class ReplaceFstStateFingerprint { public: uint64 operator()(const ReplaceStateTuple<StateId, PrefixId> &tuple) const { return tuple.fst_state; } }; // A generic hash function for replace state tuples. template <typename S, typename P> class ReplaceHash { public: size_t operator()(const ReplaceStateTuple<S, P>& t) const { static constexpr size_t prime0 = 7853; static constexpr size_t prime1 = 7867; return t.prefix_id + t.fst_id * prime0 + t.fst_state * prime1; } }; // Container for stack prefix. template <class Label, class StateId> class ReplaceStackPrefix { public: struct PrefixTuple { PrefixTuple(Label fst_id = kNoLabel, StateId nextstate = kNoStateId) : fst_id(fst_id), nextstate(nextstate) {} Label fst_id; StateId nextstate; }; ReplaceStackPrefix() {} ReplaceStackPrefix(const ReplaceStackPrefix &other) : prefix_(other.prefix_) {} void Push(StateId fst_id, StateId nextstate) { prefix_.push_back(PrefixTuple(fst_id, nextstate)); } void Pop() { prefix_.pop_back(); } const PrefixTuple &Top() const { return prefix_[prefix_.size() - 1]; } size_t Depth() const { return prefix_.size(); } public: std::vector<PrefixTuple> prefix_; }; // Equality stack prefix classes. template <class Label, class StateId> inline bool operator==(const ReplaceStackPrefix<Label, StateId> &x, const ReplaceStackPrefix<Label, StateId> &y) { if (x.prefix_.size() != y.prefix_.size()) return false; for (size_t i = 0; i < x.prefix_.size(); ++i) { if (x.prefix_[i].fst_id != y.prefix_[i].fst_id || x.prefix_[i].nextstate != y.prefix_[i].nextstate) { return false; } } return true; } // Hash function for stack prefix to prefix id. template <class Label, class StateId> class ReplaceStackPrefixHash { public: size_t operator()(const ReplaceStackPrefix<Label, StateId> &prefix) const { size_t sum = 0; for (const auto &pair : prefix.prefix_) { static constexpr size_t prime = 7863; sum += pair.fst_id + pair.nextstate * prime; } return sum; } }; // Replace state tables. // A two-level state table for replace. Warning: calls CountStates to compute // the number of states of each component FST. template <class Arc, class P = ssize_t> class VectorHashReplaceStateTable { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using PrefixId = P; using StateTuple = ReplaceStateTuple<StateId, PrefixId>; using StateTable = VectorHashStateTable<ReplaceStateTuple<StateId, PrefixId>, ReplaceRootSelector<StateId, PrefixId>, ReplaceFstStateFingerprint<StateId, PrefixId>, ReplaceFingerprint<StateId, PrefixId>>; using StackPrefix = ReplaceStackPrefix<Label, StateId>; using StackPrefixTable = CompactHashBiTable<PrefixId, StackPrefix, ReplaceStackPrefixHash<Label, StateId>>; VectorHashReplaceStateTable( const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_list, Label root) : root_size_(0) { size_array_.push_back(0); for (const auto &fst_pair : fst_list) { if (fst_pair.first == root) { root_size_ = CountStates(*(fst_pair.second)); size_array_.push_back(size_array_.back()); } else { size_array_.push_back(size_array_.back() + CountStates(*(fst_pair.second))); } } state_table_.reset( new StateTable(new ReplaceRootSelector<StateId, PrefixId>, new ReplaceFstStateFingerprint<StateId, PrefixId>, new ReplaceFingerprint<StateId, PrefixId>(&size_array_), root_size_, root_size_ + size_array_.back())); } VectorHashReplaceStateTable( const VectorHashReplaceStateTable<Arc, PrefixId> &table) : root_size_(table.root_size_), size_array_(table.size_array_), prefix_table_(table.prefix_table_) { state_table_.reset( new StateTable(new ReplaceRootSelector<StateId, PrefixId>, new ReplaceFstStateFingerprint<StateId, PrefixId>, new ReplaceFingerprint<StateId, PrefixId>(&size_array_), root_size_, root_size_ + size_array_.back())); } StateId FindState(const StateTuple &tuple) { return state_table_->FindState(tuple); } const StateTuple &Tuple(StateId id) const { return state_table_->Tuple(id); } PrefixId FindPrefixId(const StackPrefix &prefix) { return prefix_table_.FindId(prefix); } const StackPrefix& GetStackPrefix(PrefixId id) const { return prefix_table_.FindEntry(id); } private: StateId root_size_; std::vector<uint64> size_array_; std::unique_ptr<StateTable> state_table_; StackPrefixTable prefix_table_; }; // Default replace state table. template <class Arc, class P /* = size_t */> class DefaultReplaceStateTable : public CompactHashStateTable<ReplaceStateTuple<typename Arc::StateId, P>, ReplaceHash<typename Arc::StateId, P>> { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using PrefixId = P; using StateTuple = ReplaceStateTuple<StateId, PrefixId>; using StateTable = CompactHashStateTable<StateTuple, ReplaceHash<StateId, PrefixId>>; using StackPrefix = ReplaceStackPrefix<Label, StateId>; using StackPrefixTable = CompactHashBiTable<PrefixId, StackPrefix, ReplaceStackPrefixHash<Label, StateId>>; using StateTable::FindState; using StateTable::Tuple; DefaultReplaceStateTable( const std::vector<std::pair<Label, const Fst<Arc> *>> &, Label) {} DefaultReplaceStateTable(const DefaultReplaceStateTable<Arc, PrefixId> &table) : StateTable(), prefix_table_(table.prefix_table_) {} PrefixId FindPrefixId(const StackPrefix &prefix) { return prefix_table_.FindId(prefix); } const StackPrefix &GetStackPrefix(PrefixId id) const { return prefix_table_.FindEntry(id); } private: StackPrefixTable prefix_table_; }; // By default ReplaceFst will copy the input label of the replace arc. // The call_label_type and return_label_type options specify how to manage // the labels of the call arc and the return arc of the replace FST template <class Arc, class StateTable = DefaultReplaceStateTable<Arc>, class CacheStore = DefaultCacheStore<Arc>> struct ReplaceFstOptions : CacheImplOptions<CacheStore> { using Label = typename Arc::Label; // Index of root rule for expansion. Label root; // How to label call arc. ReplaceLabelType call_label_type = REPLACE_LABEL_INPUT; // How to label return arc. ReplaceLabelType return_label_type = REPLACE_LABEL_NEITHER; // Specifies output label to put on call arc; if kNoLabel, use existing label // on call arc. Otherwise, use this field as the output label. Label call_output_label = kNoLabel; // Specifies label to put on return arc. Label return_label = 0; // Take ownership of input FSTs? bool take_ownership = false; // Pointer to optional pre-constructed state table. StateTable *state_table = nullptr; explicit ReplaceFstOptions(const CacheImplOptions<CacheStore> &opts, Label root = kNoLabel) : CacheImplOptions<CacheStore>(opts), root(root) {} explicit ReplaceFstOptions(const CacheOptions &opts, Label root = kNoLabel) : CacheImplOptions<CacheStore>(opts), root(root) {} // FIXME(kbg): There are too many constructors here. Come up with a consistent // position for call_output_label (probably the very end) so that it is // possible to express all the remaining constructors with a single // default-argument constructor. Also move clients off of the "backwards // compatibility" constructor, for good. explicit ReplaceFstOptions(Label root) : root(root) {} explicit ReplaceFstOptions(Label root, ReplaceLabelType call_label_type, ReplaceLabelType return_label_type, Label return_label) : root(root), call_label_type(call_label_type), return_label_type(return_label_type), return_label(return_label) {} explicit ReplaceFstOptions(Label root, ReplaceLabelType call_label_type, ReplaceLabelType return_label_type, Label call_output_label, Label return_label) : root(root), call_label_type(call_label_type), return_label_type(return_label_type), call_output_label(call_output_label), return_label(return_label) {} explicit ReplaceFstOptions(const ReplaceUtilOptions &opts) : ReplaceFstOptions(opts.root, opts.call_label_type, opts.return_label_type, opts.return_label) {} ReplaceFstOptions() : root(kNoLabel) {} // For backwards compatibility. ReplaceFstOptions(int64 root, bool epsilon_replace_arc) : root(root), call_label_type(epsilon_replace_arc ? REPLACE_LABEL_NEITHER : REPLACE_LABEL_INPUT), call_output_label(epsilon_replace_arc ? 0 : kNoLabel) {} }; // Forward declaration. template <class Arc, class StateTable, class CacheStore> class ReplaceFstMatcher; template <class Arc> using FstList = std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>>; // Returns true if label type on arc results in epsilon input label. inline bool EpsilonOnInput(ReplaceLabelType label_type) { return label_type == REPLACE_LABEL_NEITHER || label_type == REPLACE_LABEL_OUTPUT; } // Returns true if label type on arc results in epsilon input label. inline bool EpsilonOnOutput(ReplaceLabelType label_type) { return label_type == REPLACE_LABEL_NEITHER || label_type == REPLACE_LABEL_INPUT; } // Returns true if for either the call or return arc ilabel != olabel. template <class Label> bool ReplaceTransducer(ReplaceLabelType call_label_type, ReplaceLabelType return_label_type, Label call_output_label) { return call_label_type == REPLACE_LABEL_INPUT || call_label_type == REPLACE_LABEL_OUTPUT || (call_label_type == REPLACE_LABEL_BOTH && call_output_label != kNoLabel) || return_label_type == REPLACE_LABEL_INPUT || return_label_type == REPLACE_LABEL_OUTPUT; } template <class Arc> uint64 ReplaceFstProperties(typename Arc::Label root_label, const FstList<Arc> &fst_list, ReplaceLabelType call_label_type, ReplaceLabelType return_label_type, typename Arc::Label call_output_label, bool *sorted_and_non_empty) { using Label = typename Arc::Label; std::vector<uint64> inprops; bool all_ilabel_sorted = true; bool all_olabel_sorted = true; bool all_non_empty = true; // All nonterminals are negative? bool all_negative = true; // All nonterminals are positive and form a dense range containing 1? bool dense_range = true; Label root_fst_idx = 0; for (Label i = 0; i < fst_list.size(); ++i) { const auto label = fst_list[i].first; if (label >= 0) all_negative = false; if (label > fst_list.size() || label <= 0) dense_range = false; if (label == root_label) root_fst_idx = i; const auto *fst = fst_list[i].second; if (fst->Start() == kNoStateId) all_non_empty = false; if (!fst->Properties(kILabelSorted, false)) all_ilabel_sorted = false; if (!fst->Properties(kOLabelSorted, false)) all_olabel_sorted = false; inprops.push_back(fst->Properties(kCopyProperties, false)); } const auto props = ReplaceProperties( inprops, root_fst_idx, EpsilonOnInput(call_label_type), EpsilonOnInput(return_label_type), EpsilonOnOutput(call_label_type), EpsilonOnOutput(return_label_type), ReplaceTransducer(call_label_type, return_label_type, call_output_label), all_non_empty, all_ilabel_sorted, all_olabel_sorted, all_negative || dense_range); const bool sorted = props & (kILabelSorted | kOLabelSorted); *sorted_and_non_empty = all_non_empty && sorted; return props; } namespace internal { // The replace implementation class supports a dynamic expansion of a recursive // transition network represented as label/FST pairs with dynamic replacable // arcs. template <class Arc, class StateTable, class CacheStore> class ReplaceFstImpl : public CacheBaseImpl<typename CacheStore::State, CacheStore> { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using State = typename CacheStore::State; using CacheImpl = CacheBaseImpl<State, CacheStore>; using PrefixId = typename StateTable::PrefixId; using StateTuple = ReplaceStateTuple<StateId, PrefixId>; using StackPrefix = ReplaceStackPrefix<Label, StateId>; using NonTerminalHash = std::unordered_map<Label, Label>; using FstImpl<Arc>::SetType; using FstImpl<Arc>::SetProperties; using FstImpl<Arc>::WriteHeader; using FstImpl<Arc>::SetInputSymbols; using FstImpl<Arc>::SetOutputSymbols; using FstImpl<Arc>::InputSymbols; using FstImpl<Arc>::OutputSymbols; using CacheImpl::PushArc; using CacheImpl::HasArcs; using CacheImpl::HasFinal; using CacheImpl::HasStart; using CacheImpl::SetArcs; using CacheImpl::SetFinal; using CacheImpl::SetStart; friend class ReplaceFstMatcher<Arc, StateTable, CacheStore>; ReplaceFstImpl(const FstList<Arc> &fst_list, const ReplaceFstOptions<Arc, StateTable, CacheStore> &opts) : CacheImpl(opts), call_label_type_(opts.call_label_type), return_label_type_(opts.return_label_type), call_output_label_(opts.call_output_label), return_label_(opts.return_label), state_table_(opts.state_table ? opts.state_table : new StateTable(fst_list, opts.root)) { SetType("replace"); // If the label is epsilon, then all replace label options are equivalent, // so we set the label types to NEITHER for simplicity. if (call_output_label_ == 0) call_label_type_ = REPLACE_LABEL_NEITHER; if (return_label_ == 0) return_label_type_ = REPLACE_LABEL_NEITHER; if (!fst_list.empty()) { SetInputSymbols(fst_list[0].second->InputSymbols()); SetOutputSymbols(fst_list[0].second->OutputSymbols()); } fst_array_.push_back(nullptr); for (Label i = 0; i < fst_list.size(); ++i) { const auto label = fst_list[i].first; const auto *fst = fst_list[i].second; nonterminal_hash_[label] = fst_array_.size(); nonterminal_set_.insert(label); fst_array_.emplace_back(opts.take_ownership ? fst : fst->Copy()); if (i) { if (!CompatSymbols(InputSymbols(), fst->InputSymbols())) { FSTERROR() << "ReplaceFstImpl: Input symbols of FST " << i << " do not match input symbols of base FST (0th FST)"; SetProperties(kError, kError); } if (!CompatSymbols(OutputSymbols(), fst->OutputSymbols())) { FSTERROR() << "ReplaceFstImpl: Output symbols of FST " << i << " do not match output symbols of base FST (0th FST)"; SetProperties(kError, kError); } } } const auto nonterminal = nonterminal_hash_[opts.root]; if ((nonterminal == 0) && (fst_array_.size() > 1)) { FSTERROR() << "ReplaceFstImpl: No FST corresponding to root label " << opts.root << " in the input tuple vector"; SetProperties(kError, kError); } root_ = (nonterminal > 0) ? nonterminal : 1; bool all_non_empty_and_sorted = false; SetProperties(ReplaceFstProperties(opts.root, fst_list, call_label_type_, return_label_type_, call_output_label_, &all_non_empty_and_sorted)); // Enables optional caching as long as sorted and all non-empty. always_cache_ = !all_non_empty_and_sorted; VLOG(2) << "ReplaceFstImpl::ReplaceFstImpl: always_cache = " << (always_cache_ ? "true" : "false"); } ReplaceFstImpl(const ReplaceFstImpl &impl) : CacheImpl(impl), call_label_type_(impl.call_label_type_), return_label_type_(impl.return_label_type_), call_output_label_(impl.call_output_label_), return_label_(impl.return_label_), always_cache_(impl.always_cache_), state_table_(new StateTable(*(impl.state_table_))), nonterminal_set_(impl.nonterminal_set_), nonterminal_hash_(impl.nonterminal_hash_), root_(impl.root_) { SetType("replace"); SetProperties(impl.Properties(), kCopyProperties); SetInputSymbols(impl.InputSymbols()); SetOutputSymbols(impl.OutputSymbols()); fst_array_.reserve(impl.fst_array_.size()); fst_array_.emplace_back(nullptr); for (Label i = 1; i < impl.fst_array_.size(); ++i) { fst_array_.emplace_back(impl.fst_array_[i]->Copy(true)); } } // Computes the dependency graph of the replace class and returns // true if the dependencies are cyclic. Cyclic dependencies will result // in an un-expandable FST. bool CyclicDependencies() const { const ReplaceUtilOptions opts(root_); ReplaceUtil<Arc> replace_util(fst_array_, nonterminal_hash_, opts); return replace_util.CyclicDependencies(); } StateId Start() { if (!HasStart()) { if (fst_array_.size() == 1) { SetStart(kNoStateId); return kNoStateId; } else { const auto fst_start = fst_array_[root_]->Start(); if (fst_start == kNoStateId) return kNoStateId; const auto prefix = GetPrefixId(StackPrefix()); const auto start = state_table_->FindState(StateTuple(prefix, root_, fst_start)); SetStart(start); return start; } } else { return CacheImpl::Start(); } } Weight Final(StateId s) { if (HasFinal(s)) return CacheImpl::Final(s); const auto &tuple = state_table_->Tuple(s); auto weight = Weight::Zero(); if (tuple.prefix_id == 0) { const auto fst_state = tuple.fst_state; weight = fst_array_[tuple.fst_id]->Final(fst_state); } if (always_cache_ || HasArcs(s)) SetFinal(s, weight); return weight; } size_t NumArcs(StateId s) { if (HasArcs(s)) { return CacheImpl::NumArcs(s); } else if (always_cache_) { // If always caching, expands and caches state. Expand(s); return CacheImpl::NumArcs(s); } else { // Otherwise computes the number of arcs without expanding. const auto tuple = state_table_->Tuple(s); if (tuple.fst_state == kNoStateId) return 0; auto num_arcs = fst_array_[tuple.fst_id]->NumArcs(tuple.fst_state); if (ComputeFinalArc(tuple, nullptr)) ++num_arcs; return num_arcs; } } // Returns whether a given label is a non-terminal. bool IsNonTerminal(Label label) const { if (label < *nonterminal_set_.begin() || label > *nonterminal_set_.rbegin()) { return false; } else { return nonterminal_hash_.count(label); } // TODO(allauzen): be smarter and take advantage of all_dense or // all_negative. Also use this in ComputeArc. This would require changes to // Replace so that recursing into an empty FST lead to a non co-accessible // state instead of deleting the arc as done currently. The current use // correct, since labels are sorted if all_non_empty is true. } size_t NumInputEpsilons(StateId s) { if (HasArcs(s)) { return CacheImpl::NumInputEpsilons(s); } else if (always_cache_ || !Properties(kILabelSorted)) { // If always caching or if the number of input epsilons is too expensive // to compute without caching (i.e., not ilabel-sorted), then expands and // caches state. Expand(s); return CacheImpl::NumInputEpsilons(s); } else { // Otherwise, computes the number of input epsilons without caching. const auto tuple = state_table_->Tuple(s); if (tuple.fst_state == kNoStateId) return 0; size_t num = 0; if (!EpsilonOnInput(call_label_type_)) { // If EpsilonOnInput(c) is false, all input epsilon arcs // are also input epsilons arcs in the underlying machine. num = fst_array_[tuple.fst_id]->NumInputEpsilons(tuple.fst_state); } else { // Otherwise, one need to consider that all non-terminal arcs // in the underlying machine also become input epsilon arc. ArcIterator<Fst<Arc>> aiter(*fst_array_[tuple.fst_id], tuple.fst_state); for (; !aiter.Done() && ((aiter.Value().ilabel == 0) || IsNonTerminal(aiter.Value().olabel)); aiter.Next()) { ++num; } } if (EpsilonOnInput(return_label_type_) && ComputeFinalArc(tuple, nullptr)) { ++num; } return num; } } size_t NumOutputEpsilons(StateId s) { if (HasArcs(s)) { return CacheImpl::NumOutputEpsilons(s); } else if (always_cache_ || !Properties(kOLabelSorted)) { // If always caching or if the number of output epsilons is too expensive // to compute without caching (i.e., not olabel-sorted), then expands and // caches state. Expand(s); return CacheImpl::NumOutputEpsilons(s); } else { // Otherwise, computes the number of output epsilons without caching. const auto tuple = state_table_->Tuple(s); if (tuple.fst_state == kNoStateId) return 0; size_t num = 0; if (!EpsilonOnOutput(call_label_type_)) { // If EpsilonOnOutput(c) is false, all output epsilon arcs are also // output epsilons arcs in the underlying machine. num = fst_array_[tuple.fst_id]->NumOutputEpsilons(tuple.fst_state); } else { // Otherwise, one need to consider that all non-terminal arcs in the // underlying machine also become output epsilon arc. ArcIterator<Fst<Arc>> aiter(*fst_array_[tuple.fst_id], tuple.fst_state); for (; !aiter.Done() && ((aiter.Value().olabel == 0) || IsNonTerminal(aiter.Value().olabel)); aiter.Next()) { ++num; } } if (EpsilonOnOutput(return_label_type_) && ComputeFinalArc(tuple, nullptr)) { ++num; } return num; } } uint64 Properties() const override { return Properties(kFstProperties); } // Sets error if found, and returns other FST impl properties. uint64 Properties(uint64 mask) const override { if (mask & kError) { for (Label i = 1; i < fst_array_.size(); ++i) { if (fst_array_[i]->Properties(kError, false)) { SetProperties(kError, kError); } } } return FstImpl<Arc>::Properties(mask); } // Returns the base arc iterator, and if arcs have not been computed yet, // extends and recurses for new arcs. void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) { if (!HasArcs(s)) Expand(s); CacheImpl::InitArcIterator(s, data); // TODO(allauzen): Set behaviour of generic iterator. // Warning: ArcIterator<ReplaceFst<A>>::InitCache() relies on current // behaviour. } // Extends current state (walk arcs one level deep). void Expand(StateId s) { const auto tuple = state_table_->Tuple(s); if (tuple.fst_state == kNoStateId) { // Local FST is empty. SetArcs(s); return; } ArcIterator<Fst<Arc>> aiter(*fst_array_[tuple.fst_id], tuple.fst_state); Arc arc; // Creates a final arc when needed. if (ComputeFinalArc(tuple, &arc)) PushArc(s, arc); // Expands all arcs leaving the state. for (; !aiter.Done(); aiter.Next()) { if (ComputeArc(tuple, aiter.Value(), &arc)) PushArc(s, arc); } SetArcs(s); } void Expand(StateId s, const StateTuple &tuple, const ArcIteratorData<Arc> &data) { if (tuple.fst_state == kNoStateId) { // Local FST is empty. SetArcs(s); return; } ArcIterator<Fst<Arc>> aiter(data); Arc arc; // Creates a final arc when needed. if (ComputeFinalArc(tuple, &arc)) AddArc(s, arc); // Expands all arcs leaving the state. for (; !aiter.Done(); aiter.Next()) { if (ComputeArc(tuple, aiter.Value(), &arc)) AddArc(s, arc); } SetArcs(s); } // If acpp is null, only returns true if a final arcp is required, but does // not actually compute it. bool ComputeFinalArc(const StateTuple &tuple, Arc *arcp, uint32 flags = kArcValueFlags) { const auto fst_state = tuple.fst_state; if (fst_state == kNoStateId) return false; // If state is final, pops the stack. if (fst_array_[tuple.fst_id]->Final(fst_state) != Weight::Zero() && tuple.prefix_id) { if (arcp) { arcp->ilabel = (EpsilonOnInput(return_label_type_)) ? 0 : return_label_; arcp->olabel = (EpsilonOnOutput(return_label_type_)) ? 0 : return_label_; if (flags & kArcNextStateValue) { const auto &stack = state_table_->GetStackPrefix(tuple.prefix_id); const auto prefix_id = PopPrefix(stack); const auto &top = stack.Top(); arcp->nextstate = state_table_->FindState( StateTuple(prefix_id, top.fst_id, top.nextstate)); } if (flags & kArcWeightValue) { arcp->weight = fst_array_[tuple.fst_id]->Final(fst_state); } } return true; } else { return false; } } // Computes an arc in the FST corresponding to one in the underlying machine. // Returns false if the underlying arc corresponds to no arc in the resulting // FST. bool ComputeArc(const StateTuple &tuple, const Arc &arc, Arc *arcp, uint32 flags = kArcValueFlags) { if (!EpsilonOnInput(call_label_type_) && (flags == (flags & (kArcILabelValue | kArcWeightValue)))) { *arcp = arc; return true; } if (arc.olabel == 0 || arc.olabel < *nonterminal_set_.begin() || arc.olabel > *nonterminal_set_.rbegin()) { // Expands local FST. const auto nextstate = flags & kArcNextStateValue ? state_table_->FindState( StateTuple(tuple.prefix_id, tuple.fst_id, arc.nextstate)) : kNoStateId; *arcp = Arc(arc.ilabel, arc.olabel, arc.weight, nextstate); } else { // Checks for non-terminal. const auto it = nonterminal_hash_.find(arc.olabel); if (it != nonterminal_hash_.end()) { // Recurses into non-terminal. const auto nonterminal = it->second; const auto nt_prefix = PushPrefix(state_table_->GetStackPrefix(tuple.prefix_id), tuple.fst_id, arc.nextstate); // If the start state is valid, replace; othewise, the arc is implicitly // deleted. const auto nt_start = fst_array_[nonterminal]->Start(); if (nt_start != kNoStateId) { const auto nt_nextstate = flags & kArcNextStateValue ? state_table_->FindState(StateTuple( nt_prefix, nonterminal, nt_start)) : kNoStateId; const auto ilabel = (EpsilonOnInput(call_label_type_)) ? 0 : arc.ilabel; const auto olabel = (EpsilonOnOutput(call_label_type_)) ? 0 : ((call_output_label_ == kNoLabel) ? arc.olabel : call_output_label_); *arcp = Arc(ilabel, olabel, arc.weight, nt_nextstate); } else { return false; } } else { const auto nextstate = flags & kArcNextStateValue ? state_table_->FindState( StateTuple(tuple.prefix_id, tuple.fst_id, arc.nextstate)) : kNoStateId; *arcp = Arc(arc.ilabel, arc.olabel, arc.weight, nextstate); } } return true; } // Returns the arc iterator flags supported by this FST. uint32 ArcIteratorFlags() const { uint32 flags = kArcValueFlags; if (!always_cache_) flags |= kArcNoCache; return flags; } StateTable *GetStateTable() const { return state_table_.get(); } const Fst<Arc> *GetFst(Label fst_id) const { return fst_array_[fst_id].get(); } Label GetFstId(Label nonterminal) const { const auto it = nonterminal_hash_.find(nonterminal); if (it == nonterminal_hash_.end()) { FSTERROR() << "ReplaceFstImpl::GetFstId: Nonterminal not found: " << nonterminal; } return it->second; } // Returns true if label type on call arc results in epsilon input label. bool EpsilonOnCallInput() { return EpsilonOnInput(call_label_type_); } private: // The unique index into stack prefix table. PrefixId GetPrefixId(const StackPrefix &prefix) { return state_table_->FindPrefixId(prefix); } // The prefix ID after a stack pop. PrefixId PopPrefix(StackPrefix prefix) { prefix.Pop(); return GetPrefixId(prefix); } // The prefix ID after a stack push. PrefixId PushPrefix(StackPrefix prefix, Label fst_id, StateId nextstate) { prefix.Push(fst_id, nextstate); return GetPrefixId(prefix); } // Runtime options ReplaceLabelType call_label_type_; // How to label call arc. ReplaceLabelType return_label_type_; // How to label return arc. int64 call_output_label_; // Specifies output label to put on call arc int64 return_label_; // Specifies label to put on return arc. bool always_cache_; // Disable optional caching of arc iterator? // State table. std::unique_ptr<StateTable> state_table_; // Replace components. std::set<Label> nonterminal_set_; NonTerminalHash nonterminal_hash_; std::vector<std::unique_ptr<const Fst<Arc>>> fst_array_; Label root_; }; } // namespace internal // // ReplaceFst supports dynamic replacement of arcs in one FST with another FST. // This replacement is recursive. ReplaceFst can be used to support a variety of // delayed constructions such as recursive // transition networks, union, or closure. It is constructed with an array of // FST(s). One FST represents the root (or topology) machine. The root FST // refers to other FSTs by recursively replacing arcs labeled as non-terminals // with the matching non-terminal FST. Currently the ReplaceFst uses the output // symbols of the arcs to determine whether the arc is a non-terminal arc or // not. A non-terminal can be any label that is not a non-zero terminal label in // the output alphabet. // // Note that the constructor uses a vector of pairs. These correspond to the // tuple of non-terminal Label and corresponding FST. For example to implement // the closure operation we need 2 FSTs. The first root FST is a single // self-loop arc on the start state. // // The ReplaceFst class supports an optionally caching arc iterator. // // The ReplaceFst needs to be built such that it is known to be ilabel- or // olabel-sorted (see usage below). // // Observe that Matcher<Fst<A>> will use the optionally caching arc iterator // when available (the FST is ilabel-sorted and matching on the input, or the // FST is olabel -orted and matching on the output). In order to obtain the // most efficient behaviour, it is recommended to set call_label_type to // REPLACE_LABEL_INPUT or REPLACE_LABEL_BOTH and return_label_type to // REPLACE_LABEL_OUTPUT or REPLACE_LABEL_NEITHER. This means that the call arc // does not have epsilon on the input side and the return arc has epsilon on the // input side) and matching on the input side. // // This class attaches interface to implementation and handles reference // counting, delegating most methods to ImplToFst. template <class A, class T /* = DefaultReplaceStateTable<A> */, class CacheStore /* = DefaultCacheStore<A> */> class ReplaceFst : public ImplToFst<internal::ReplaceFstImpl<A, T, CacheStore>> { public: using Arc = A; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using StateTable = T; using Store = CacheStore; using State = typename CacheStore::State; using Impl = internal::ReplaceFstImpl<Arc, StateTable, CacheStore>; using CacheImpl = internal::CacheBaseImpl<State, CacheStore>; using ImplToFst<Impl>::Properties; friend class ArcIterator<ReplaceFst<Arc, StateTable, CacheStore>>; friend class StateIterator<ReplaceFst<Arc, StateTable, CacheStore>>; friend class ReplaceFstMatcher<Arc, StateTable, CacheStore>; ReplaceFst(const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_array, Label root) : ImplToFst<Impl>(std::make_shared<Impl>( fst_array, ReplaceFstOptions<Arc, StateTable, CacheStore>(root))) {} ReplaceFst(const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_array, const ReplaceFstOptions<Arc, StateTable, CacheStore> &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst_array, opts)) {} // See Fst<>::Copy() for doc. ReplaceFst(const ReplaceFst<Arc, StateTable, CacheStore> &fst, bool safe = false) : ImplToFst<Impl>(fst, safe) {} // Get a copy of this ReplaceFst. See Fst<>::Copy() for further doc. ReplaceFst<Arc, StateTable, CacheStore> *Copy( bool safe = false) const override { return new ReplaceFst<Arc, StateTable, CacheStore>(*this, safe); } inline void InitStateIterator(StateIteratorData<Arc> *data) const override; void InitArcIterator(StateId s, ArcIteratorData<Arc> *data) const override { GetMutableImpl()->InitArcIterator(s, data); } MatcherBase<Arc> *InitMatcher(MatchType match_type) const override { if ((GetImpl()->ArcIteratorFlags() & kArcNoCache) && ((match_type == MATCH_INPUT && Properties(kILabelSorted, false)) || (match_type == MATCH_OUTPUT && Properties(kOLabelSorted, false)))) { return new ReplaceFstMatcher<Arc, StateTable, CacheStore> (this, match_type); } else { VLOG(2) << "Not using replace matcher"; return nullptr; } } bool CyclicDependencies() const { return GetImpl()->CyclicDependencies(); } const StateTable &GetStateTable() const { return *GetImpl()->GetStateTable(); } const Fst<Arc> &GetFst(Label nonterminal) const { return *GetImpl()->GetFst(GetImpl()->GetFstId(nonterminal)); } private: using ImplToFst<Impl>::GetImpl; using ImplToFst<Impl>::GetMutableImpl; ReplaceFst &operator=(const ReplaceFst &) = delete; }; // Specialization for ReplaceFst. template <class Arc, class StateTable, class CacheStore> class StateIterator<ReplaceFst<Arc, StateTable, CacheStore>> : public CacheStateIterator<ReplaceFst<Arc, StateTable, CacheStore>> { public: explicit StateIterator(const ReplaceFst<Arc, StateTable, CacheStore> &fst) : CacheStateIterator<ReplaceFst<Arc, StateTable, CacheStore>>( fst, fst.GetMutableImpl()) {} }; // Specialization for ReplaceFst, implementing optional caching. It is be used // as follows: // // ReplaceFst<A> replace; // ArcIterator<ReplaceFst<A>> aiter(replace, s); // // Note: ArcIterator< Fst<A>> is always a caching arc iterator. // aiter.SetFlags(kArcNoCache, kArcNoCache); // // Uses the arc iterator, no arc will be cached, no state will be expanded. // // Arc flags can be used to decide which component of the arc need to be // computed. // aiter.SetFlags(kArcILabelValue, kArcValueFlags); // // Wants the ilabel for this arc. // aiter.Value(); // Does not compute the destination state. // aiter.Next(); // aiter.SetFlags(kArcNextStateValue, kArcNextStateValue); // // Wants the ilabel and next state for this arc. // aiter.Value(); // Does compute the destination state and inserts it // // in the replace state table. // // No additional arcs have been cached at this point. template <class Arc, class StateTable, class CacheStore> class ArcIterator<ReplaceFst<Arc, StateTable, CacheStore>> { public: using StateId = typename Arc::StateId; using StateTuple = typename StateTable::StateTuple; ArcIterator(const ReplaceFst<Arc, StateTable, CacheStore> &fst, StateId s) : fst_(fst), s_(s), pos_(0), offset_(0), flags_(kArcValueFlags), arcs_(nullptr), data_flags_(0), final_flags_(0) { cache_data_.ref_count = nullptr; local_data_.ref_count = nullptr; // If FST does not support optional caching, forces caching. if (!(fst_.GetImpl()->ArcIteratorFlags() & kArcNoCache) && !(fst_.GetImpl()->HasArcs(s_))) { fst_.GetMutableImpl()->Expand(s_); } // If state is already cached, use cached arcs array. if (fst_.GetImpl()->HasArcs(s_)) { (fst_.GetImpl()) ->internal::template CacheBaseImpl< typename CacheStore::State, CacheStore>::InitArcIterator(s_, &cache_data_); num_arcs_ = cache_data_.narcs; arcs_ = cache_data_.arcs; // arcs_ is a pointer to the cached arcs. data_flags_ = kArcValueFlags; // All the arc member values are valid. } else { // Otherwise delay decision until Value() is called. tuple_ = fst_.GetImpl()->GetStateTable()->Tuple(s_); if (tuple_.fst_state == kNoStateId) { num_arcs_ = 0; } else { // The decision to cache or not to cache has been defered until Value() // or // SetFlags() is called. However, the arc iterator is set up now to be // ready for non-caching in order to keep the Value() method simple and // efficient. const auto *rfst = fst_.GetImpl()->GetFst(tuple_.fst_id); rfst->InitArcIterator(tuple_.fst_state, &local_data_); // arcs_ is a pointer to the arcs in the underlying machine. arcs_ = local_data_.arcs; // Computes the final arc (but not its destination state) if a final arc // is required. bool has_final_arc = fst_.GetMutableImpl()->ComputeFinalArc( tuple_, &final_arc_, kArcValueFlags & ~kArcNextStateValue); // Sets the arc value flags that hold for final_arc_. final_flags_ = kArcValueFlags & ~kArcNextStateValue; // Computes the number of arcs. num_arcs_ = local_data_.narcs; if (has_final_arc) ++num_arcs_; // Sets the offset between the underlying arc positions and the // positions // in the arc iterator. offset_ = num_arcs_ - local_data_.narcs; // Defers the decision to cache or not until Value() or SetFlags() is // called. data_flags_ = 0; } } } ~ArcIterator() { if (cache_data_.ref_count) --(*cache_data_.ref_count); if (local_data_.ref_count) --(*local_data_.ref_count); } void ExpandAndCache() const { // TODO(allauzen): revisit this. // fst_.GetImpl()->Expand(s_, tuple_, local_data_); // (fst_.GetImpl())->CacheImpl<A>*>::InitArcIterator(s_, // &cache_data_); // fst_.InitArcIterator(s_, &cache_data_); // Expand and cache state. arcs_ = cache_data_.arcs; // arcs_ is a pointer to the cached arcs. data_flags_ = kArcValueFlags; // All the arc member values are valid. offset_ = 0; // No offset. } void Init() { if (flags_ & kArcNoCache) { // If caching is disabled // arcs_ is a pointer to the arcs in the underlying machine. arcs_ = local_data_.arcs; // Sets the arcs value flags that hold for arcs_. data_flags_ = kArcWeightValue; if (!fst_.GetMutableImpl()->EpsilonOnCallInput()) { data_flags_ |= kArcILabelValue; } // Sets the offset between the underlying arc positions and the positions // in the arc iterator. offset_ = num_arcs_ - local_data_.narcs; } else { ExpandAndCache(); } } bool Done() const { return pos_ >= num_arcs_; } const Arc &Value() const { // If data_flags_ is 0, non-caching was not requested. if (!data_flags_) { // TODO(allauzen): Revisit this. if (flags_ & kArcNoCache) { // Should never happen. FSTERROR() << "ReplaceFst: Inconsistent arc iterator flags"; } ExpandAndCache(); } if (pos_ - offset_ >= 0) { // The requested arc is not the final arc. const auto &arc = arcs_[pos_ - offset_]; if ((data_flags_ & flags_) == (flags_ & kArcValueFlags)) { // If the value flags match the recquired value flags then returns the // arc. return arc; } else { // Otherwise, compute the corresponding arc on-the-fly. fst_.GetMutableImpl()->ComputeArc(tuple_, arc, &arc_, flags_ & kArcValueFlags); return arc_; } } else { // The requested arc is the final arc. if ((final_flags_ & flags_) != (flags_ & kArcValueFlags)) { // If the arc value flags that hold for the final arc do not match the // requested value flags, then // final_arc_ needs to be updated. fst_.GetMutableImpl()->ComputeFinalArc(tuple_, &final_arc_, flags_ & kArcValueFlags); final_flags_ = flags_ & kArcValueFlags; } return final_arc_; } } void Next() { ++pos_; } size_t Position() const { return pos_; } void Reset() { pos_ = 0; } void Seek(size_t pos) { pos_ = pos; } uint32 Flags() const { return flags_; } void SetFlags(uint32 flags, uint32 mask) { // Updates the flags taking into account what flags are supported // by the FST. flags_ &= ~mask; flags_ |= (flags & fst_.GetImpl()->ArcIteratorFlags()); // If non-caching is not requested (and caching has not already been // performed), then flush data_flags_ to request caching during the next // call to Value(). if (!(flags_ & kArcNoCache) && data_flags_ != kArcValueFlags) { if (!fst_.GetImpl()->HasArcs(s_)) data_flags_ = 0; } // If data_flags_ has been flushed but non-caching is requested before // calling Value(), then set up the iterator for non-caching. if ((flags & kArcNoCache) && (!data_flags_)) Init(); } private: const ReplaceFst<Arc, StateTable, CacheStore> &fst_; // Reference to the FST. StateId s_; // State in the FST. mutable StateTuple tuple_; // Tuple corresponding to state_. ssize_t pos_; // Current position. mutable ssize_t offset_; // Offset between position in iterator and in arcs_. ssize_t num_arcs_; // Number of arcs at state_. uint32 flags_; // Behavorial flags for the arc iterator mutable Arc arc_; // Memory to temporarily store computed arcs. mutable ArcIteratorData<Arc> cache_data_; // Arc iterator data in cache. mutable ArcIteratorData<Arc> local_data_; // Arc iterator data in local FST. mutable const Arc *arcs_; // Array of arcs. mutable uint32 data_flags_; // Arc value flags valid for data in arcs_. mutable Arc final_arc_; // Final arc (when required). mutable uint32 final_flags_; // Arc value flags valid for final_arc_. ArcIterator(const ArcIterator &) = delete; ArcIterator &operator=(const ArcIterator &) = delete; }; template <class Arc, class StateTable, class CacheStore> class ReplaceFstMatcher : public MatcherBase<Arc> { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using FST = ReplaceFst<Arc, StateTable, CacheStore>; using LocalMatcher = MultiEpsMatcher<Matcher<Fst<Arc>>>; using StateTuple = typename StateTable::StateTuple; // This makes a copy of the FST. ReplaceFstMatcher(const ReplaceFst<Arc, StateTable, CacheStore> &fst, MatchType match_type) : owned_fst_(fst.Copy()), fst_(*owned_fst_), impl_(fst_.GetMutableImpl()), s_(fst::kNoStateId), match_type_(match_type), current_loop_(false), final_arc_(false), loop_(kNoLabel, 0, Weight::One(), kNoStateId) { if (match_type_ == fst::MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } InitMatchers(); } // This doesn't copy the FST. ReplaceFstMatcher(const ReplaceFst<Arc, StateTable, CacheStore> *fst, MatchType match_type) : fst_(*fst), impl_(fst_.GetMutableImpl()), s_(fst::kNoStateId), match_type_(match_type), current_loop_(false), final_arc_(false), loop_(kNoLabel, 0, Weight::One(), kNoStateId) { if (match_type_ == fst::MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } InitMatchers(); } // This makes a copy of the FST. ReplaceFstMatcher( const ReplaceFstMatcher<Arc, StateTable, CacheStore> &matcher, bool safe = false) : owned_fst_(matcher.fst_.Copy(safe)), fst_(*owned_fst_), impl_(fst_.GetMutableImpl()), s_(fst::kNoStateId), match_type_(matcher.match_type_), current_loop_(false), final_arc_(false), loop_(fst::kNoLabel, 0, Weight::One(), fst::kNoStateId) { if (match_type_ == fst::MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } InitMatchers(); } // Creates a local matcher for each component FST in the RTN. LocalMatcher is // a multi-epsilon wrapper matcher. MultiEpsilonMatcher is used to match each // non-terminal arc, since these non-terminal // turn into epsilons on recursion. void InitMatchers() { const auto &fst_array = impl_->fst_array_; matcher_.resize(fst_array.size()); for (Label i = 0; i < fst_array.size(); ++i) { if (fst_array[i]) { matcher_[i].reset( new LocalMatcher(*fst_array[i], match_type_, kMultiEpsList)); auto it = impl_->nonterminal_set_.begin(); for (; it != impl_->nonterminal_set_.end(); ++it) { matcher_[i]->AddMultiEpsLabel(*it); } } } } ReplaceFstMatcher<Arc, StateTable, CacheStore> *Copy( bool safe = false) const override { return new ReplaceFstMatcher<Arc, StateTable, CacheStore>(*this, safe); } MatchType Type(bool test) const override { if (match_type_ == MATCH_NONE) return match_type_; const auto true_prop = match_type_ == MATCH_INPUT ? kILabelSorted : kOLabelSorted; const auto false_prop = match_type_ == MATCH_INPUT ? kNotILabelSorted : kNotOLabelSorted; const auto props = fst_.Properties(true_prop | false_prop, test); if (props & true_prop) { return match_type_; } else if (props & false_prop) { return MATCH_NONE; } else { return MATCH_UNKNOWN; } } const Fst<Arc> &GetFst() const override { return fst_; } uint64 Properties(uint64 props) const override { return props; } // Sets the state from which our matching happens. void SetState(StateId s) final { if (s_ == s) return; s_ = s; tuple_ = impl_->GetStateTable()->Tuple(s_); if (tuple_.fst_state == kNoStateId) { done_ = true; return; } // Gets current matcher, used for non-epsilon matching. current_matcher_ = matcher_[tuple_.fst_id].get(); current_matcher_->SetState(tuple_.fst_state); loop_.nextstate = s_; final_arc_ = false; } // Searches for label from previous set state. If label == 0, first // hallucinate an epsilon loop; otherwise use the underlying matcher to // search for the label or epsilons. Note since the ReplaceFst recursion // on non-terminal arcs causes epsilon transitions to be created we use // MultiEpsilonMatcher to search for possible matches of non-terminals. If the // component FST // reaches a final state we also need to add the exiting final arc. bool Find(Label label) final { bool found = false; label_ = label; if (label_ == 0 || label_ == kNoLabel) { // Computes loop directly, avoiding Replace::ComputeArc. if (label_ == 0) { current_loop_ = true; found = true; } // Searches for matching multi-epsilons. final_arc_ = impl_->ComputeFinalArc(tuple_, nullptr); found = current_matcher_->Find(kNoLabel) || final_arc_ || found; } else { // Searches on a sub machine directly using sub machine matcher. found = current_matcher_->Find(label_); } return found; } bool Done() const final { return !current_loop_ && !final_arc_ && current_matcher_->Done(); } const Arc &Value() const final { if (current_loop_) return loop_; if (final_arc_) { impl_->ComputeFinalArc(tuple_, &arc_); return arc_; } const auto &component_arc = current_matcher_->Value(); impl_->ComputeArc(tuple_, component_arc, &arc_); return arc_; } void Next() final { if (current_loop_) { current_loop_ = false; return; } if (final_arc_) { final_arc_ = false; return; } current_matcher_->Next(); } ssize_t Priority(StateId s) final { return fst_.NumArcs(s); } private: std::unique_ptr<const ReplaceFst<Arc, StateTable, CacheStore>> owned_fst_; const ReplaceFst<Arc, StateTable, CacheStore> &fst_; internal::ReplaceFstImpl<Arc, StateTable, CacheStore> *impl_; LocalMatcher *current_matcher_; std::vector<std::unique_ptr<LocalMatcher>> matcher_; StateId s_; // Current state. Label label_; // Current label. MatchType match_type_; // Supplied by caller. mutable bool done_; mutable bool current_loop_; // Current arc is the implicit loop. mutable bool final_arc_; // Current arc for exiting recursion. mutable StateTuple tuple_; // Tuple corresponding to state_. mutable Arc arc_; Arc loop_; ReplaceFstMatcher &operator=(const ReplaceFstMatcher &) = delete; }; template <class Arc, class StateTable, class CacheStore> inline void ReplaceFst<Arc, StateTable, CacheStore>::InitStateIterator( StateIteratorData<Arc> *data) const { data->base = new StateIterator<ReplaceFst<Arc, StateTable, CacheStore>>(*this); } using StdReplaceFst = ReplaceFst<StdArc>; // Recursively replaces arcs in the root FSTs with other FSTs. // This version writes the result of replacement to an output MutableFst. // // Replace supports replacement of arcs in one Fst with another FST. This // replacement is recursive. Replace takes an array of FST(s). One FST // represents the root (or topology) machine. The root FST refers to other FSTs // by recursively replacing arcs labeled as non-terminals with the matching // non-terminal FST. Currently Replace uses the output symbols of the arcs to // determine whether the arc is a non-terminal arc or not. A non-terminal can be // any label that is not a non-zero terminal label in the output alphabet. // // Note that input argument is a vector of pairs. These correspond to the tuple // of non-terminal Label and corresponding FST. template <class Arc> void Replace(const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &ifst_array, MutableFst<Arc> *ofst, ReplaceFstOptions<Arc> opts = ReplaceFstOptions<Arc>()) { opts.gc = true; opts.gc_limit = 0; // Caches only the last state for fastest copy. *ofst = ReplaceFst<Arc>(ifst_array, opts); } template <class Arc> void Replace(const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &ifst_array, MutableFst<Arc> *ofst, const ReplaceUtilOptions &opts) { Replace(ifst_array, ofst, ReplaceFstOptions<Arc>(opts)); } // For backwards compatibility. template <class Arc> void Replace(const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &ifst_array, MutableFst<Arc> *ofst, typename Arc::Label root, bool epsilon_on_replace) { Replace(ifst_array, ofst, ReplaceFstOptions<Arc>(root, epsilon_on_replace)); } template <class Arc> void Replace(const std::vector<std::pair<typename Arc::Label, const Fst<Arc> *>> &ifst_array, MutableFst<Arc> *ofst, typename Arc::Label root) { Replace(ifst_array, ofst, ReplaceFstOptions<Arc>(root)); } } // namespace fst #endif // FST_REPLACE_H_
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/kenlm/GIT_REVISION
b9f35777d112ce2fc10bd3986302517a16dc3883
0
coqui_public_repos/STT
coqui_public_repos/STT/doc/Java-API.rst
Java ==== STTModel --------------- .. doxygenclass:: ai::coqui::libstt::STTModel :project: stt-java :members: Metadata -------- .. doxygenclass:: ai::coqui::libstt::Metadata :project: stt-java :members: getNumTranscripts, getTranscript CandidateTranscript ------------------- .. doxygenclass:: ai::coqui::libstt::CandidateTranscript :project: stt-java :members: getNumTokens, getConfidence, getToken TokenMetadata ------------- .. doxygenclass:: ai::coqui::libstt::TokenMetadata :project: stt-java :members: getText, getTimestep, getStartTime
0
coqui_public_repos/TTS/TTS/tts
coqui_public_repos/TTS/TTS/tts/configs/tacotron2_config.py
from dataclasses import dataclass from TTS.tts.configs.tacotron_config import TacotronConfig @dataclass class Tacotron2Config(TacotronConfig): """Defines parameters for Tacotron2 based models. Example: >>> from TTS.tts.configs.tacotron2_config import Tacotron2Config >>> config = Tacotron2Config() Check `TacotronConfig` for argument descriptions. """ model: str = "tacotron2" out_channels: int = 80 encoder_in_features: int = 512 decoder_in_features: int = 512
0
coqui_public_repos/STT-models/odia/itml
coqui_public_repos/STT-models/odia/itml/v0.1.0/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT-examples/web_microphone_websocket
coqui_public_repos/STT-examples/web_microphone_websocket/src/index.js
import React from 'react'; import ReactDOM from 'react-dom'; import './index.css'; import App from './App'; ReactDOM.render(<App />, document.getElementById('root'));
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/script/script-impl.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // This file defines the registration mechanism for new operations. // These operations are designed to enable scripts to work with FST classes // at a high level. // // If you have a new arc type and want these operations to work with FSTs // with that arc type, see below for the registration steps // you must take. // // These methods are only recommended for use in high-level scripting // applications. Most users should use the lower-level templated versions // corresponding to these. // // If you have a new arc type you'd like these operations to work with, // use the REGISTER_FST_OPERATIONS macro defined in fstscript.h. // // If you have a custom operation you'd like to define, you need four // components. In the following, assume you want to create a new operation // with the signature // // void Foo(const FstClass &ifst, MutableFstClass *ofst); // // You need: // // 1) A way to bundle the args that your new Foo operation will take, as // a single struct. The template structs in arg-packs.h provide a handy // way to do this. In Foo's case, that might look like this: // // using FooArgs = std::pair<const FstClass &, MutableFstClass *>; // // Note: this package of args is going to be passed by non-const pointer. // // 2) A function template that is able to perform Foo, given the args and // arc type. Yours might look like this: // // template<class Arc> // void Foo(FooArgs *args) { // // Pulls out the actual, arc-templated FSTs. // const Fst<Arc> &ifst = std::get<0>(*args).GetFst<Arc>(); // MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>(); // // Actually perform Foo on ifst and ofst. // } // // 3) a client-facing function for your operation. This would look like // the following: // // void Foo(const FstClass &ifst, MutableFstClass *ofst) { // // Check that the arc types of the FSTs match // if (!ArcTypesMatch(ifst, *ofst, "Foo")) return; // // package the args // FooArgs args(ifst, ofst); // // Finally, call the operation // Apply<Operation<FooArgs>>("Foo", ifst->ArcType(), &args); // } // // The Apply<> function template takes care of the link between 2 and 3, // provided you also have: // // 4) A registration for your new operation, on the arc types you care about. // This can be provided easily by the REGISTER_FST_OPERATION macro in // operations.h: // // REGISTER_FST_OPERATION(Foo, StdArc, FooArgs); // REGISTER_FST_OPERATION(Foo, MyArc, FooArgs); // // .. etc // // // That's it! Now when you call Foo(const FstClass &, MutableFstClass *), // it dispatches (in #3) via the Apply<> function to the correct // instantiation of the template function in #2. // #ifndef FST_SCRIPT_SCRIPT_IMPL_H_ #define FST_SCRIPT_SCRIPT_IMPL_H_ // This file contains general-purpose templates which are used in the // implementation of the operations. #include <string> #include <utility> #include <fst/generic-register.h> #include <fst/script/fst-class.h> #include <fst/log.h> namespace fst { namespace script { enum RandArcSelection { UNIFORM_ARC_SELECTOR, LOG_PROB_ARC_SELECTOR, FAST_LOG_PROB_ARC_SELECTOR }; // A generic register for operations with various kinds of signatures. // Needed since every function signature requires a new registration class. // The std::pair<string, string> is understood to be the operation name and arc // type; subclasses (or typedefs) need only provide the operation signature. template <class OperationSignature> class GenericOperationRegister : public GenericRegister<std::pair<string, string>, OperationSignature, GenericOperationRegister<OperationSignature>> { public: void RegisterOperation(const string &operation_name, const string &arc_type, OperationSignature op) { this->SetEntry(std::make_pair(operation_name, arc_type), op); } OperationSignature GetOperation(const string &operation_name, const string &arc_type) { return this->GetEntry(std::make_pair(operation_name, arc_type)); } protected: string ConvertKeyToSoFilename( const std::pair<string, string> &key) const final { // Uses the old-style FST for now. string legal_type(key.second); // The arc type. ConvertToLegalCSymbol(&legal_type); return legal_type + "-arc.so"; } }; // Operation package: everything you need to register a new type of operation. // The ArgPack should be the type that's passed into each wrapped function; // for instance, it might be a struct containing all the args. It's always // passed by pointer, so const members should be used to enforce constness where // it's needed. Return values should be implemented as a member of ArgPack as // well. template <class Args> struct Operation { using ArgPack = Args; using OpType = void (*)(ArgPack *args); // The register (hash) type. using Register = GenericOperationRegister<OpType>; // The register-er type using Registerer = GenericRegisterer<Register>; }; // Macro for registering new types of operations. #define REGISTER_FST_OPERATION(Op, Arc, ArgPack) \ static fst::script::Operation<ArgPack>::Registerer \ arc_dispatched_operation_##ArgPack##Op##Arc##_registerer \ (std::make_pair(#Op, Arc::Type()), Op<Arc>) // Template function to apply an operation by name. template <class OpReg> void Apply(const string &op_name, const string &arc_type, typename OpReg::ArgPack *args) { const auto op = OpReg::Register::GetRegister()->GetOperation(op_name, arc_type); if (!op) { FSTERROR() << "No operation found for " << op_name << " on " << "arc type " << arc_type; return; } op(args); } namespace internal { // Helper that logs to ERROR if the arc types of m and n don't match, // assuming that both m and n implement .ArcType(). The op_name argument is // used to construct the error message. template <class M, class N> bool ArcTypesMatch(const M &m, const N &n, const string &op_name) { if (m.ArcType() != n.ArcType()) { FSTERROR() << "Arguments with non-matching arc types passed to " << op_name << ":\t" << m.ArcType() << " and " << n.ArcType(); return false; } return true; } // From untyped to typed weights. template <class Weight> void CopyWeights(const std::vector<WeightClass> &weights, std::vector<Weight> *typed_weights) { typed_weights->clear(); typed_weights->reserve(weights.size()); for (const auto &weight : weights) { typed_weights->push_back(*weight.GetWeight<Weight>()); } } // From typed to untyped weights. template <class Weight> void CopyWeights(const std::vector<Weight> &typed_weights, std::vector<WeightClass> *weights) { weights->clear(); weights->reserve(typed_weights.size()); for (const auto &typed_weight : typed_weights) { weights->emplace_back(typed_weight); } } } // namespace internal } // namespace script } // namespace fst #endif // FST_SCRIPT_SCRIPT_IMPL_H_
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/types/set.hpp
/*! \file set.hpp \brief Support for types found in \<set\> \ingroup STLSupport */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_TYPES_SET_HPP_ #define CEREAL_TYPES_SET_HPP_ #include "cereal/cereal.hpp" #include <set> namespace cereal { namespace set_detail { //! @internal template <class Archive, class SetT> inline void save( Archive & ar, SetT const & set ) { ar( make_size_tag( static_cast<size_type>(set.size()) ) ); for( const auto & i : set ) ar( i ); } //! @internal template <class Archive, class SetT> inline void load( Archive & ar, SetT & set ) { size_type size; ar( make_size_tag( size ) ); set.clear(); auto hint = set.begin(); for( size_type i = 0; i < size; ++i ) { typename SetT::key_type key; ar( key ); #ifdef CEREAL_OLDER_GCC hint = set.insert( hint, std::move( key ) ); #else // NOT CEREAL_OLDER_GCC hint = set.emplace_hint( hint, std::move( key ) ); #endif // NOT CEREAL_OLDER_GCC } } } //! Saving for std::set template <class Archive, class K, class C, class A> inline void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::set<K, C, A> const & set ) { set_detail::save( ar, set ); } //! Loading for std::set template <class Archive, class K, class C, class A> inline void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::set<K, C, A> & set ) { set_detail::load( ar, set ); } //! Saving for std::multiset template <class Archive, class K, class C, class A> inline void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::multiset<K, C, A> const & multiset ) { set_detail::save( ar, multiset ); } //! Loading for std::multiset template <class Archive, class K, class C, class A> inline void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::multiset<K, C, A> & multiset ) { set_detail::load( ar, multiset ); } } // namespace cereal #endif // CEREAL_TYPES_SET_HPP_
0
coqui_public_repos/STT/native_client/kenlm/util
coqui_public_repos/STT/native_client/kenlm/util/double-conversion/fixed-dtoa.h
// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_ #define DOUBLE_CONVERSION_FIXED_DTOA_H_ #include "utils.h" namespace kenlm_double_conversion { // Produces digits necessary to print a given number with // 'fractional_count' digits after the decimal point. // The buffer must be big enough to hold the result plus one terminating null // character. // // The produced digits might be too short in which case the caller has to fill // the gaps with '0's. // Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and // decimal_point = -2. // Halfway cases are rounded towards +/-Infinity (away from 0). The call // FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. // The returned buffer may contain digits that would be truncated from the // shortest representation of the input. // // This method only works for some parameters. If it can't handle the input it // returns false. The output is null-terminated when the function succeeds. bool FastFixedDtoa(double v, int fractional_count, Vector<char> buffer, int* length, int* decimal_point); } // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_FIXED_DTOA_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstepsnormalize-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Epsilon-normalizes an FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/script/epsnormalize.h> #include <fst/script/getters.h> DECLARE_bool(eps_norm_output); int fstepsnormalize_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; using fst::script::VectorFstClass; string usage = "Epsilon normalizes an FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; VectorFstClass ofst(ifst->ArcType()); s::EpsNormalize(*ifst, &ofst, s::GetEpsNormalizeType(FLAGS_eps_norm_output)); return !ofst.Write(out_name); }
0
coqui_public_repos/STT-models/estonian/itml
coqui_public_repos/STT-models/estonian/itml/v0.1.0/MODEL_CARD.md
# Model card for Estonian STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group. - Model language: Estonian / Eesti / `et` - Model date: April 9, 2021 - Model type: `Speech-to-Text` - Model version: `v0.1.0` - Compatible with 🐸 STT version: `v0.9.3` - License: AGPL - Citation details: `@techreport{estonian-stt, author = {Tyers,Francis}, title = {Estonian STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-ET-0.1} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [Estonian Language](https://en.wikipedia.org/wiki/Estonian_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/cv/). |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|92.2\%|29.5\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on Common Voice 6.1 train. ## Evaluation data The Model was evaluated on Common Voice 6.1 test. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-electronjs_v9.0_16k-linux-amd64-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "linux-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} ${electronjs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 9.0.1 16k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU ElectronJS v9.0 tests (16kHz)" description: "Testing DeepSpeech for Linux/AMD64 on ElectronJS v9.0, CPU only, optimized version (16kHz)"
0
coqui_public_repos
coqui_public_repos/open-bible-scripts/run-biblica-splits-asante-twi.sh
#!/bin/bash LANGUAGE_NAME="asante-twi" mkdir $LANGUAGE_NAME cd $LANGUAGE_NAME #parallel --eta -a ../data/$LANGUAGE_NAME.txt "wget {}" #unzip twiONA20_SFM.zip #unzip twiONA20_timingfiles.zip for i in *_wav.zip;do DIR=$(echo $i|cut -d'_' -f2); # mkdir -p $DIR # unzip $i -d $DIR python ../split_verse_asante-twi.py --path_to_wavs "$DIR/$DIR/" --path_to_timings "timingfiles/$DIR/" --path_to_book_sfm twiONA20_SFM/*"${DIR}"*.SFM --output "$DIR/" done
0
coqui_public_repos
coqui_public_repos/Trainer/CONTRIBUTING.md
# Contribution guidelines Welcome to the 👟! This repository is governed by [the Contributor Covenant Code of Conduct](https://github.com/coqui-ai/Trainer/blob/main/CODE_OF_CONDUCT.md). ## Where to start. We welcome everyone who likes to contribute to 👟. You can contribute not only with code but with bug reports, comments, questions, answers, or just a simple tweet to spread the word. If you like to contribute code, squash a bug but if you don't know where to start, here are some pointers. - [Github Issues Tracker](https://github.com/coqui-ai/Trainer/issues) This is a place to find feature requests, bugs. Issues with the ```good first issue``` tag are good place for beginners to take on. - ✨**PR**✨ [pages](https://github.com/coqui-ai/Trainer/pulls) with the ```🚀new version``` tag. We list all the target improvements for the next version. You can pick one of them and start contributing. - Also feel free to suggest new features. We're always open for new things. ## Sending a ✨**PR**✨ If you have a new feature or a bug to squash, go ahead and send a ✨**PR**✨. Please use the following steps for a ✨**PR**✨. Let us know if you encounter a problem along the way. The following steps are tested on an Ubuntu system. 1. Fork 👟[https://github.com/coqui-ai/Trainer] by clicking the fork button at the top right corner of the project page. 2. Clone 👟 and add the main repo as a new remote named ```upsteam```. ```bash $ git clone git@github.com:<your Github name>/Trainer.git $ cd Trainer $ git remote add upstream https://github.com/coqui-ai/Trainer.git ``` 3. Install 👟 for development. ```bash $ make install ``` 4. Create a new branch with an informative name for your goal. ```bash $ git checkout -b an_informative_name_for_my_branch ``` 5. Implement your changes on your new branch. 6. Explain your code using [Google Style](https://google.github.io/styleguide/pyguide.html#381-docstrings) docstrings. 7. Add your tests to our test suite under ```tests``` folder. It is important to show that your code works, edge cases are considered, and inform others about the intended use. 8. Run the tests to see how your updates work with the rest of the project. You can repeat this step multiple times as you implement your changes to make sure you are on the right direction. ```bash $ make test # stop at the first error $ make test_all # run all the tests, report all the errors ``` 9. Format your code. We use ```black``` for code and ```isort``` for ```import``` formatting. ```bash $ make style ``` 10. Run the linter and correct the issues raised. We use ```pylint``` for linting. It helps to enforce a coding standard, offers simple refactoring suggestions. ```bash $ make lint ``` 11. When things are good, add new files and commit your changes. ```bash $ git add my_file1.py my_file2.py ... $ git commit ``` It's a good practice to regularly sync your local copy of the project with the upstream code to keep up with the recent updates. ```bash $ git fetch upstream $ git rebase upstream/master # or for the development version $ git rebase upstream/dev ``` 12. Send a PR to ```dev``` branch. Push your branch to your fork. ```bash $ git push -u origin an_informative_name_for_my_branch ``` Then go to your fork's Github page and click on 'Pull request' to send your ✨**PR**✨. Please set ✨**PR**✨'s target branch to ```dev``` as we use ```dev``` to work on the next version. 13. Let's discuss until it is perfect. 💪 We might ask you for certain changes that would appear in the ✨**PR**✨'s page under 👟[https://github.com/coqui-ai/Trainer/pulls]. 14. Once things look perfect, We merge it to the ```dev``` branch and make it ready for the next version. Feel free to ping us at any step you need help using our communication channels. If you are new to Github or open-source contribution, These are good resources. - [Github Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/proposing-changes-to-your-work-with-pull-requests) - [First-Contribution](https://github.com/firstcontributions/first-contributions)
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/tf_linux-arm64-cpu-dbg.yml
build: template_file: generic_tc_caching-linux-opt-base.tyml cache: artifact_url: ${system.tensorflow_dbg.linux_arm64.url} artifact_namespace: ${system.tensorflow_dbg.linux_arm64.namespace} system_config: > ${tensorflow.packages_xenial.apt} && ${java.packages_xenial.apt} scripts: setup: "taskcluster/tf_tc-setup.sh" build: "taskcluster/tf_tc-build.sh --linux-arm64 dbg" package: "taskcluster/tf_tc-package.sh" maxRunTime: 14400 workerType: "${docker.tfBuild}" metadata: name: "TensorFlow Linux ARM64 Cortex-A53 CPU debug" description: "Building TensorFlow for Linux ARM64 Cortex-A53, CPU only, debug version"
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/python/ios.pxd
# See www.openfst.org for extensive documentation on this weighted # finite-state transducer library. from libcpp.string cimport string from basictypes cimport int8 from basictypes cimport int16 from basictypes cimport int32 from basictypes cimport int64 from basictypes cimport uint8 from basictypes cimport uint16 from basictypes cimport uint32 from basictypes cimport uint64 cdef extern from "<iostream>" namespace "std" nogil: cdef cppclass iostream: pass cdef cppclass istream(iostream): pass cdef cppclass ostream(iostream): pass # We are ignoring openmodes for the moment. cdef extern from "<fstream>" namespace "std" nogil: cdef cppclass ifstream(istream): ifstream(const string &) cdef cppclass ofstream(ostream): ofstream(const string &) cdef extern from "<sstream>" namespace "std" nogil: cdef cppclass stringstream(istream, ostream): stringstream() string str() stringstream &operator<<(const string &) stringstream &operator<<(bool) # We define these in terms of the Google basictypes. stringstream &operator<<(int8) stringstream &operator<<(uint8) stringstream &operator<<(int16) stringstream &operator<<(uint16) stringstream &operator<<(int32) stringstream &operator<<(uint32) stringstream &operator<<(int64) stringstream &operator<<(uint64) stringstream &operator<<(double) stringstream &operator<<(long double)
0
coqui_public_repos/TTS/recipes/kokoro
coqui_public_repos/TTS/recipes/kokoro/tacotron2-DDC/run.sh
#!/bin/bash # take the scripts's parent's directory to prefix all the output paths. RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" CORPUS=kokoro-speech-v1_1-small echo $RUN_DIR if [ \! -d $RUN_DIR/$CORPUS ] ; then echo "$RUN_DIR/$CORPUS doesn't exist." echo "Follow the instruction of https://github.com/kaiidams/Kokoro-Speech-Dataset to make the corpus." exit 1 fi # create train-val splits shuf $RUN_DIR/$CORPUS/metadata.csv > $RUN_DIR/$CORPUS/metadata_shuf.csv head -n 8000 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_train.csv tail -n 812 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_val.csv # compute dataset mean and variance for normalization python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/$CORPUS/wavs/ # training .... # change the GPU id if needed CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tts.py --config_path $RUN_DIR/tacotron2-DDC.json \ --coqpit.output_path $RUN_DIR \ --coqpit.datasets.0.path $RUN_DIR/$CORPUS \ --coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \ --coqpit.phoneme_cache_path $RUN_DIR/phoneme_cache \
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/pdt/CMakeLists.txt
file(GLOB HEADER_FILES ../../include/fst/extensions/pdt/*.h) message(STATUS "${HEADER_FILES}") if(HAVE_SCRIPT) add_library(fstpdtscript getters.cc pdtscript.cc ${HEADER_FILES}) target_link_libraries(fstpdtscript fstscript fst) set_target_properties(fstpdtscript PROPERTIES SOVERSION "${SOVERSION}" FOLDER pdt ) install(TARGETS fstpdtscript LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION lib ) endif(HAVE_SCRIPT) if(HAVE_BIN) function (add_executable2 _name) add_executable(${ARGV}) if (TARGET ${_name}) target_link_libraries(${_name} fstpdtscript fstscript fst ${CMAKE_DL_LIBS}) set_target_properties(${_name} PROPERTIES FOLDER pdt/bin ) endif() install(TARGETS ${_name} RUNTIME DESTINATION bin) endfunction() add_executable2(pdtcompose pdtcompose.cc) add_executable2(pdtexpand pdtexpand.cc) add_executable2(pdtinfo pdtinfo.cc) add_executable2(pdtreplace pdtreplace.cc) add_executable2(pdtreverse pdtreverse.cc) add_executable2(pdtshortestpath pdtshortestpath.cc) endif(HAVE_BIN)
0
coqui_public_repos/STT-examples/web_microphone_websocket
coqui_public_repos/STT-examples/web_microphone_websocket/public/index.html
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <link rel="icon" href="%PUBLIC_URL%/favicon.ico" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="theme-color" content="#000000" /> <meta name="description" content="Web site created using create-react-app" /> <link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" /> <!-- manifest.json provides metadata used when your web app is installed on a user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/ --> <link rel="manifest" href="%PUBLIC_URL%/manifest.json" /> <!-- Notice the use of %PUBLIC_URL% in the tags above. It will be replaced with the URL of the `public` folder during the build. Only files inside the `public` folder can be referenced from the HTML. Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will work correctly both with client-side routing and a non-root public URL. Learn how to configure a non-root public URL by running `npm run build`. --> <title>STT - Web Microphone Websocket Example</title> </head> <body> <noscript>You need to enable JavaScript to run this app.</noscript> <div id="root"></div> <!-- This HTML file is a template. If you open it directly in the browser, you will see an empty page. You can add webfonts, meta tags, or analytics to this file. The build step will place the bundled scripts into the <body> tag. To begin the development, run `npm start` or `yarn start`. To create a production bundle, use `npm run build` or `yarn build`. --> </body> </html>
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers/dnnl/dnnl_provider_factory.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "onnxruntime_c_api.h" #ifdef __cplusplus extern "C" { #endif /** * \param use_arena zero: false. non-zero: true. */ ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_Dnnl, _In_ OrtSessionOptions* options, int use_arena); #ifdef __cplusplus } #endif
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/reweight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Function to reweight an FST. #ifndef FST_REWEIGHT_H_ #define FST_REWEIGHT_H_ #include <vector> #include <fst/log.h> #include <fst/mutable-fst.h> namespace fst { enum ReweightType { REWEIGHT_TO_INITIAL, REWEIGHT_TO_FINAL }; // Reweights an FST according to a vector of potentials in a given direction. // The weight must be left distributive when reweighting towards the initial // state and right distributive when reweighting towards the final states. // // An arc of weight w, with an origin state of potential p and destination state // of potential q, is reweighted by p^-1 \otimes (w \otimes q) when reweighting // torwards the initial state, and by (p \otimes w) \otimes q^-1 when // reweighting towards the final states. template <class Arc> void Reweight(MutableFst<Arc> *fst, const std::vector<typename Arc::Weight> &potential, ReweightType type) { using Weight = typename Arc::Weight; if (fst->NumStates() == 0) return; // TODO(kbg): Make this a compile-time static_assert once we have a pleasant // way to "deregister" this operation for non-distributive semirings so an // informative error message is produced. if (type == REWEIGHT_TO_FINAL && !(Weight::Properties() & kRightSemiring)) { FSTERROR() << "Reweight: Reweighting to the final states requires " << "Weight to be right distributive: " << Weight::Type(); fst->SetProperties(kError, kError); return; } // TODO(kbg): Make this a compile-time static_assert once we have a pleasant // way to "deregister" this operation for non-distributive semirings so an // informative error message is produced. if (type == REWEIGHT_TO_INITIAL && !(Weight::Properties() & kLeftSemiring)) { FSTERROR() << "Reweight: Reweighting to the initial state requires " << "Weight to be left distributive: " << Weight::Type(); fst->SetProperties(kError, kError); return; } StateIterator<MutableFst<Arc>> siter(*fst); for (; !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (s == potential.size()) break; const auto &weight = potential[s]; if (weight != Weight::Zero()) { for (MutableArcIterator<MutableFst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); if (arc.nextstate >= potential.size()) continue; const auto &nextweight = potential[arc.nextstate]; if (nextweight == Weight::Zero()) continue; if (type == REWEIGHT_TO_INITIAL) { arc.weight = Divide(Times(arc.weight, nextweight), weight, DIVIDE_LEFT); } if (type == REWEIGHT_TO_FINAL) { arc.weight = Divide(Times(weight, arc.weight), nextweight, DIVIDE_RIGHT); } aiter.SetValue(arc); } if (type == REWEIGHT_TO_INITIAL) { fst->SetFinal(s, Divide(fst->Final(s), weight, DIVIDE_LEFT)); } } if (type == REWEIGHT_TO_FINAL) { fst->SetFinal(s, Times(weight, fst->Final(s))); } } // This handles elements past the end of the potentials array. for (; !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (type == REWEIGHT_TO_FINAL) { fst->SetFinal(s, Times(Weight::Zero(), fst->Final(s))); } } const auto startweight = fst->Start() < potential.size() ? potential[fst->Start()] : Weight::Zero(); if ((startweight != Weight::One()) && (startweight != Weight::Zero())) { if (fst->Properties(kInitialAcyclic, true) & kInitialAcyclic) { const auto s = fst->Start(); for (MutableArcIterator<MutableFst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); if (type == REWEIGHT_TO_INITIAL) { arc.weight = Times(startweight, arc.weight); } else { arc.weight = Times(Divide(Weight::One(), startweight, DIVIDE_RIGHT), arc.weight); } aiter.SetValue(arc); } if (type == REWEIGHT_TO_INITIAL) { fst->SetFinal(s, Times(startweight, fst->Final(s))); } else { fst->SetFinal(s, Times(Divide(Weight::One(), startweight, DIVIDE_RIGHT), fst->Final(s))); } } else { const auto s = fst->AddState(); const auto weight = (type == REWEIGHT_TO_INITIAL) ? startweight : Divide(Weight::One(), startweight, DIVIDE_RIGHT); fst->AddArc(s, Arc(0, 0, weight, fst->Start())); fst->SetStart(s); } } fst->SetProperties(ReweightProperties(fst->Properties(kFstProperties, false)), kFstProperties); } } // namespace fst #endif // FST_REWEIGHT_H_
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/kenlm/MANIFEST.in
# file GENERATED by distutils, do NOT edit include setup.py include lm/*.cc include lm/*.hh include python/*.cpp include util/*.cc include util/*.hh include util/double-conversion/*.cc include util/double-conversion/*.h
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-cpp_16k_bytes-darwin-amd64-opt.yml
build: template_file: test-darwin-opt-base.tyml dependencies: - "darwin-amd64-cpu-opt" - "test-training-extra_16k-linux-amd64-py36m-opt" - "homebrew_tests-darwin-amd64" test_model_task: "test-training-extra_16k-linux-amd64-py36m-opt" args: tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-cpp-bytes-ds-tests.sh 16k" metadata: name: "DeepSpeech OSX AMD64 CPU C++ tests (Bytes Output Model, 16kHz)" description: "Testing DeepSpeech C++ for OSX/AMD64, CPU only, optimized version (Bytes Output Model, 16kHz)"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/script/compile.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_COMPILE_H_ #define FST_SCRIPT_COMPILE_H_ #include <istream> #include <memory> #include <fst/script/arg-packs.h> #include <fst/script/compile-impl.h> #include <fst/script/fst-class.h> namespace fst { namespace script { // This operation exists in two forms. 1 is a void operation which writes the // compiled machine to disk; 2 returns an FstClass. I/O should normally be done // using the binary format for efficiency, so users are STRONGLY ENCOURAGED to // use 1 or to construct FSTs using the C++ FST mutation operations. // Note: it is safe to pass these strings as references because // this struct is only used to pass them deeper in the call graph. // Be sure you understand why this is so before using this struct // for anything else! struct CompileFstInnerArgs { std::istream &istrm; const string &source; const string &fst_type; const fst::SymbolTable *isyms; const fst::SymbolTable *osyms; const fst::SymbolTable *ssyms; const bool accep; const bool ikeep; const bool okeep; const bool nkeep; const bool allow_negative_labels; CompileFstInnerArgs(std::istream &istrm, const string &source, const string &fst_type, const fst::SymbolTable *isyms, const fst::SymbolTable *osyms, const fst::SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels = false) : istrm(istrm), source(source), fst_type(fst_type), isyms(isyms), osyms(osyms), ssyms(ssyms), accep(accep), ikeep(ikeep), okeep(okeep), nkeep(nkeep), allow_negative_labels(allow_negative_labels) {} }; using CompileFstArgs = WithReturnValue<FstClass *, CompileFstInnerArgs>; template <class Arc> void CompileFstInternal(CompileFstArgs *args) { using fst::Convert; using fst::Fst; using fst::FstCompiler; FstCompiler<Arc> fstcompiler( args->args.istrm, args->args.source, args->args.isyms, args->args.osyms, args->args.ssyms, args->args.accep, args->args.ikeep, args->args.okeep, args->args.nkeep, args->args.allow_negative_labels); const Fst<Arc> *fst = &fstcompiler.Fst(); std::unique_ptr<const Fst<Arc>> owned_fst; if (args->args.fst_type != "vector") { owned_fst.reset(Convert<Arc>(*fst, args->args.fst_type)); if (!owned_fst) { FSTERROR() << "Failed to convert FST to desired type: " << args->args.fst_type; } fst = owned_fst.get(); } args->retval = fst ? new FstClass(*fst) : nullptr; } void CompileFst(std::istream &istrm, const string &source, const string &dest, const string &fst_type, const string &arc_type, const SymbolTable *isyms, const SymbolTable *osyms, const SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels); FstClass *CompileFstInternal(std::istream &istrm, const string &source, const string &fst_type, const string &arc_type, const SymbolTable *isyms, const SymbolTable *osyms, const SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels); } // namespace script } // namespace fst #endif // FST_SCRIPT_COMPILE_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstinvert.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. int fstinvert_main(int argc, char **argv); int main(int argc, char **argv) { return fstinvert_main(argc, argv); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/dictionary/Dictionary.cpp
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT-style license found in the * LICENSE file in the root directory of this source tree. */ #include <iostream> #include <stdexcept> #include "flashlight/lib/common/String.h" #include "flashlight/lib/common/System.h" #include "flashlight/lib/text/dictionary/Dictionary.h" #include "flashlight/lib/text/dictionary/Utils.h" namespace fl { namespace lib { namespace text { Dictionary::Dictionary(std::istream& stream) { createFromStream(stream); } Dictionary::Dictionary(const std::string& filename) { std::ifstream stream = createInputStream(filename); createFromStream(stream); } void Dictionary::createFromStream(std::istream& stream) { if (!stream) { throw std::runtime_error("Unable to open dictionary input stream."); } std::string line; while (std::getline(stream, line)) { if (line.empty()) { continue; } auto tkns = splitOnWhitespace(line, true); auto idx = idx2entry_.size(); // All entries on the same line map to the same index for (const auto& tkn : tkns) { addEntry(tkn, idx); } } if (!isContiguous()) { throw std::runtime_error("Invalid dictionary format - not contiguous"); } } void Dictionary::addEntry(const std::string& entry, int idx) { if (entry2idx_.find(entry) != entry2idx_.end()) { throw std::invalid_argument( "Duplicate entry name in dictionary '" + entry + "'"); } entry2idx_[entry] = idx; if (idx2entry_.find(idx) == idx2entry_.end()) { idx2entry_[idx] = entry; } } void Dictionary::addEntry(const std::string& entry) { // Check if the entry already exists in the dictionary if (entry2idx_.find(entry) != entry2idx_.end()) { throw std::invalid_argument( "Duplicate entry in dictionary '" + entry + "'"); } int idx = idx2entry_.size(); // Find first available index. while (idx2entry_.find(idx) != idx2entry_.end()) { ++idx; } addEntry(entry, idx); } std::string Dictionary::getEntry(int idx) const { auto iter = idx2entry_.find(idx); if (iter == idx2entry_.end()) { throw std::invalid_argument( "Unknown index in dictionary '" + std::to_string(idx) + "'"); } return iter->second; } void Dictionary::setDefaultIndex(int idx) { defaultIndex_ = idx; } int Dictionary::getIndex(const std::string& entry) const { auto iter = entry2idx_.find(entry); if (iter == entry2idx_.end()) { if (defaultIndex_ < 0) { throw std::invalid_argument( "Unknown entry in dictionary: '" + entry + "'"); } else { return defaultIndex_; } } return iter->second; } bool Dictionary::contains(const std::string& entry) const { auto iter = entry2idx_.find(entry); if (iter == entry2idx_.end()) { return false; } return true; } size_t Dictionary::entrySize() const { return entry2idx_.size(); } bool Dictionary::isContiguous() const { for (size_t i = 0; i < indexSize(); ++i) { if (idx2entry_.find(i) == idx2entry_.end()) { return false; } } for (const auto& tknidx : entry2idx_) { if (idx2entry_.find(tknidx.second) == idx2entry_.end()) { return false; } } return true; } std::vector<int> Dictionary::mapEntriesToIndices( const std::vector<std::string>& entries) const { std::vector<int> indices; indices.reserve(entries.size()); for (const auto& tkn : entries) { indices.emplace_back(getIndex(tkn)); } return indices; } std::vector<std::string> Dictionary::mapIndicesToEntries( const std::vector<int>& indices) const { std::vector<std::string> entries; entries.reserve(indices.size()); for (const auto& idx : indices) { entries.emplace_back(getEntry(idx)); } return entries; } size_t Dictionary::indexSize() const { return idx2entry_.size(); } } // namespace text } // namespace lib } // namespace fl
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/lib/libfst.vcxproj.filters
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemGroup> <Filter Include="Source Files"> <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier> <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions> </Filter> <Filter Include="Header Files"> <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier> <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions> </Filter> </ItemGroup> <ItemGroup> <ClCompile Include="compat.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="flags.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="fst.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="properties.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="symbol-table.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="symbol-table-ops.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="util.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="weight.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="mapped-file.cc"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="fst-types.cc"> <Filter>Source Files</Filter> </ClCompile> </ItemGroup> <ItemGroup> <ClInclude Include="..\include\fst\accumulator.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\add-on.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\arc.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\arc-arena.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\arcfilter.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\arc-map.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\arcsort.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\bi-table.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\cache.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\closure.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\compact-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\compat.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\complement.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\compose.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\compose-filter.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\concat.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\config.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\connect.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\const-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\determinize.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\dfs-visit.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\difference.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\disambiguate.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\edit-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\encode.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\epsnormalize.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\equal.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\equivalent.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\expanded-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\expectation-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\factor-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\filter-state.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\flags.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\float-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\fst-decl.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\fstlib.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\generic-register.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\heap.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\icu.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\intersect.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\interval-set.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\invert.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\isomorphic.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\label-reachable.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\lexicographic-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\lock.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\log.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\lookahead-filter.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\lookahead-matcher.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\map.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\mapped-file.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\matcher.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\matcher-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\memory.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\minimize.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\mutable-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\pair-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\partition.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\power-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\product-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\project.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\properties.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\prune.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\push.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\queue.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\randequivalent.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\randgen.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\rational.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\register.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\relabel.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\replace.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\replace-util.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\reverse.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\reweight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\rmepsilon.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\rmfinalepsilon.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\shortest-distance.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\shortest-path.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\signed-log-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\sparse-power-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\sparse-tuple-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\state-map.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\state-reachable.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\statesort.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\state-table.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\string.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\string-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\symbol-table.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\symbol-table-ops.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\synchronize.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\test-properties.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\topsort.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\tuple-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\types.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\union.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\union-find.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\union-weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\util.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\vector-fst.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\verify.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\visit.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\weight.h"> <Filter>Header Files</Filter> </ClInclude> <ClInclude Include="..\include\fst\set-weight.h"> <Filter>Header Files</Filter> </ClInclude> </ItemGroup> </Project>
0