repo_id
stringlengths
18
103
file_path
stringlengths
30
136
content
stringlengths
2
3.36M
__index_level_0__
int64
0
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstinfo.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_string(arc_filter, "any", "Arc filter: one of:" " \"any\", \"epsilon\", \"iepsilon\", \"oepsilon\"; " "this only affects the counts of (co)accessible states, " "connected states, and (strongly) connected components"); DEFINE_string(info_type, "auto", "Info format: one of: \"auto\", \"long\", \"short\""); DEFINE_bool(pipe, false, "Send info to stderr, input to stdout"); DEFINE_bool(test_properties, true, "Compute property values (if unknown to FST)"); DEFINE_bool(fst_verify, true, "Verify FST sanity"); int fstinfo_main(int argc, char **argv); int main(int argc, char **argv) { return fstinfo_main(argc, argv); }
0
coqui_public_repos/STT-models/romanian/itml
coqui_public_repos/STT-models/romanian/itml/v0.1.0/alphabet.txt
a b c d e f g h i j k l m n o p q r s t u v w x y z â î ă ș ț
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/isomorphic.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/isomorphic.h> #include <fst/script/script-impl.h> namespace fst { namespace script { bool Isomorphic(const FstClass &fst1, const FstClass &fst2, float delta) { if (!internal::ArcTypesMatch(fst1, fst2, "Isomorphic")) return false; IsomorphicInnerArgs iargs(fst1, fst2, delta); IsomorphicArgs args(iargs); Apply<Operation<IsomorphicArgs>>("Isomorphic", fst1.ArcType(), &args); return args.retval; } REGISTER_FST_OPERATION(Isomorphic, StdArc, IsomorphicArgs); REGISTER_FST_OPERATION(Isomorphic, LogArc, IsomorphicArgs); REGISTER_FST_OPERATION(Isomorphic, Log64Arc, IsomorphicArgs); } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/ngram-fst.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // NgramFst implements a n-gram language model based upon the LOUDS data // structure. Please refer to "Unary Data Structures for Language Models" // http://research.google.com/pubs/archive/37218.pdf #ifndef FST_EXTENSIONS_NGRAM_NGRAM_FST_H_ #define FST_EXTENSIONS_NGRAM_NGRAM_FST_H_ #include <stddef.h> #include <string.h> #include <algorithm> #include <iostream> #include <string> #include <utility> #include <vector> #include <fst/compat.h> #include <fst/log.h> #include <fstream> #include <fst/extensions/ngram/bitmap-index.h> #include <fst/fstlib.h> #include <fst/mapped-file.h> namespace fst { template <class A> class NGramFst; template <class A> class NGramFstMatcher; // Instance data containing mutable state for bookkeeping repeated access to // the same state. template <class A> struct NGramFstInst { typedef typename A::Label Label; typedef typename A::StateId StateId; typedef typename A::Weight Weight; StateId state_; size_t num_futures_; size_t offset_; size_t node_; StateId node_state_; std::vector<Label> context_; StateId context_state_; NGramFstInst() : state_(kNoStateId), node_state_(kNoStateId), context_state_(kNoStateId) {} }; namespace internal { // Implementation class for LOUDS based NgramFst interface. template <class A> class NGramFstImpl : public FstImpl<A> { using FstImpl<A>::SetInputSymbols; using FstImpl<A>::SetOutputSymbols; using FstImpl<A>::SetType; using FstImpl<A>::WriteHeader; friend class ArcIterator<NGramFst<A>>; friend class NGramFstMatcher<A>; public: using FstImpl<A>::InputSymbols; using FstImpl<A>::SetProperties; using FstImpl<A>::Properties; typedef A Arc; typedef typename A::Label Label; typedef typename A::StateId StateId; typedef typename A::Weight Weight; NGramFstImpl() { SetType("ngram"); SetInputSymbols(nullptr); SetOutputSymbols(nullptr); SetProperties(kStaticProperties); } NGramFstImpl(const Fst<A> &fst, std::vector<StateId> *order_out); explicit NGramFstImpl(const Fst<A> &fst) : NGramFstImpl(fst, nullptr) {} NGramFstImpl(const NGramFstImpl &other) { FSTERROR() << "Copying NGramFst Impls is not supported, use safe = false."; SetProperties(kError, kError); } ~NGramFstImpl() override { if (owned_) { delete[] data_; } } static NGramFstImpl<A> *Read(std::istream &strm, // NOLINT const FstReadOptions &opts) { NGramFstImpl<A> *impl = new NGramFstImpl(); FstHeader hdr; if (!impl->ReadHeader(strm, opts, kMinFileVersion, &hdr)) return 0; uint64 num_states, num_futures, num_final; const size_t offset = sizeof(num_states) + sizeof(num_futures) + sizeof(num_final); // Peek at num_states and num_futures to see how much more needs to be read. strm.read(reinterpret_cast<char *>(&num_states), sizeof(num_states)); strm.read(reinterpret_cast<char *>(&num_futures), sizeof(num_futures)); strm.read(reinterpret_cast<char *>(&num_final), sizeof(num_final)); size_t size = Storage(num_states, num_futures, num_final); MappedFile *data_region = MappedFile::Allocate(size); char *data = reinterpret_cast<char *>(data_region->mutable_data()); // Copy num_states, num_futures and num_final back into data. memcpy(data, reinterpret_cast<char *>(&num_states), sizeof(num_states)); memcpy(data + sizeof(num_states), reinterpret_cast<char *>(&num_futures), sizeof(num_futures)); memcpy(data + sizeof(num_states) + sizeof(num_futures), reinterpret_cast<char *>(&num_final), sizeof(num_final)); strm.read(data + offset, size - offset); if (strm.fail()) { delete impl; return nullptr; } impl->Init(data, false, data_region); return impl; } bool Write(std::ostream &strm, // NOLINT const FstWriteOptions &opts) const { FstHeader hdr; hdr.SetStart(Start()); hdr.SetNumStates(num_states_); WriteHeader(strm, opts, kFileVersion, &hdr); strm.write(data_, StorageSize()); return !strm.fail(); } StateId Start() const { return start_; } Weight Final(StateId state) const { if (final_index_.Get(state)) { return final_probs_[final_index_.Rank1(state)]; } else { return Weight::Zero(); } } size_t NumArcs(StateId state, NGramFstInst<A> *inst = nullptr) const { if (inst == nullptr) { const std::pair<size_t, size_t> zeros = (state == 0) ? select_root_ : future_index_.Select0s(state); return zeros.second - zeros.first - 1; } SetInstFuture(state, inst); return inst->num_futures_ + ((state == 0) ? 0 : 1); } size_t NumInputEpsilons(StateId state) const { // State 0 has no parent, thus no backoff. if (state == 0) return 0; return 1; } size_t NumOutputEpsilons(StateId state) const { return NumInputEpsilons(state); } StateId NumStates() const { return num_states_; } void InitStateIterator(StateIteratorData<A> *data) const { data->base = 0; data->nstates = num_states_; } static size_t Storage(uint64 num_states, uint64 num_futures, uint64 num_final) { uint64 b64; Weight weight; Label label; size_t offset = sizeof(num_states) + sizeof(num_futures) + sizeof(num_final); offset += sizeof(b64) * (BitmapIndex::StorageSize(num_states * 2 + 1) + BitmapIndex::StorageSize(num_futures + num_states + 1) + BitmapIndex::StorageSize(num_states)); offset += (num_states + 1) * sizeof(label) + num_futures * sizeof(label); // Pad for alignemnt, see // http://en.wikipedia.org/wiki/Data_structure_alignment#Computing_padding offset = (offset + sizeof(weight) - 1) & ~(sizeof(weight) - 1); offset += (num_states + 1) * sizeof(weight) + num_final * sizeof(weight) + (num_futures + 1) * sizeof(weight); return offset; } void SetInstFuture(StateId state, NGramFstInst<A> *inst) const { if (inst->state_ != state) { inst->state_ = state; const std::pair<size_t, size_t> zeros = future_index_.Select0s(state); inst->num_futures_ = zeros.second - zeros.first - 1; inst->offset_ = future_index_.Rank1(zeros.first + 1); } } void SetInstNode(NGramFstInst<A> *inst) const { if (inst->node_state_ != inst->state_) { inst->node_state_ = inst->state_; inst->node_ = context_index_.Select1(inst->state_); } } void SetInstContext(NGramFstInst<A> *inst) const { SetInstNode(inst); if (inst->context_state_ != inst->state_) { inst->context_state_ = inst->state_; inst->context_.clear(); size_t node = inst->node_; while (node != 0) { inst->context_.push_back(context_words_[context_index_.Rank1(node)]); node = context_index_.Select1(context_index_.Rank0(node) - 1); } } } // Access to the underlying representation const char *GetData(size_t *data_size) const { *data_size = StorageSize(); return data_; } void Init(const char *data, bool owned, MappedFile *file = nullptr); const std::vector<Label> &GetContext(StateId s, NGramFstInst<A> *inst) const { SetInstFuture(s, inst); SetInstContext(inst); return inst->context_; } size_t StorageSize() const { return Storage(num_states_, num_futures_, num_final_); } void GetStates(const std::vector<Label> &context, std::vector<StateId> *states) const; private: StateId Transition(const std::vector<Label> &context, Label future) const; // Properties always true for this Fst class. static const uint64 kStaticProperties = kAcceptor | kIDeterministic | kODeterministic | kEpsilons | kIEpsilons | kOEpsilons | kILabelSorted | kOLabelSorted | kWeighted | kCyclic | kInitialAcyclic | kNotTopSorted | kAccessible | kCoAccessible | kNotString | kExpanded; // Current file format version. static const int kFileVersion = 4; // Minimum file format version supported. static const int kMinFileVersion = 4; std::unique_ptr<MappedFile> data_region_; const char *data_ = nullptr; bool owned_ = false; // True if we own data_ StateId start_ = fst::kNoStateId; uint64 num_states_ = 0; uint64 num_futures_ = 0; uint64 num_final_ = 0; std::pair<size_t, size_t> select_root_; const Label *root_children_ = nullptr; // borrowed references const uint64 *context_ = nullptr; const uint64 *future_ = nullptr; const uint64 *final_ = nullptr; const Label *context_words_ = nullptr; const Label *future_words_ = nullptr; const Weight *backoff_ = nullptr; const Weight *final_probs_ = nullptr; const Weight *future_probs_ = nullptr; BitmapIndex context_index_; BitmapIndex future_index_; BitmapIndex final_index_; }; template <typename A> inline void NGramFstImpl<A>::GetStates( const std::vector<Label> &context, std::vector<typename A::StateId> *states) const { states->clear(); states->push_back(0); typename std::vector<Label>::const_reverse_iterator cit = context.rbegin(); const Label *children = root_children_; size_t num_children = select_root_.second - 2; const Label *loc = std::lower_bound(children, children + num_children, *cit); if (loc == children + num_children || *loc != *cit) return; size_t node = 2 + loc - children; states->push_back(context_index_.Rank1(node)); if (context.size() == 1) return; size_t node_rank = context_index_.Rank1(node); std::pair<size_t, size_t> zeros = node_rank == 0 ? select_root_ : context_index_.Select0s(node_rank); size_t first_child = zeros.first + 1; ++cit; if (context_index_.Get(first_child) != false) { size_t last_child = zeros.second - 1; while (cit != context.rend()) { children = context_words_ + context_index_.Rank1(first_child); loc = std::lower_bound(children, children + last_child - first_child + 1, *cit); if (loc == children + last_child - first_child + 1 || *loc != *cit) { break; } ++cit; node = first_child + loc - children; states->push_back(context_index_.Rank1(node)); node_rank = context_index_.Rank1(node); zeros = node_rank == 0 ? select_root_ : context_index_.Select0s(node_rank); first_child = zeros.first + 1; if (context_index_.Get(first_child) == false) break; last_child = zeros.second - 1; } } } } // namespace internal /*****************************************************************************/ template <class A> class NGramFst : public ImplToExpandedFst<internal::NGramFstImpl<A>> { friend class ArcIterator<NGramFst<A>>; friend class NGramFstMatcher<A>; public: typedef A Arc; typedef typename A::StateId StateId; typedef typename A::Label Label; typedef typename A::Weight Weight; typedef internal::NGramFstImpl<A> Impl; explicit NGramFst(const Fst<A> &dst) : ImplToExpandedFst<Impl>(std::make_shared<Impl>(dst, nullptr)) {} NGramFst(const Fst<A> &fst, std::vector<StateId> *order_out) : ImplToExpandedFst<Impl>(std::make_shared<Impl>(fst, order_out)) {} // Because the NGramFstImpl is a const stateless data structure, there // is never a need to do anything beside copy the reference. NGramFst(const NGramFst<A> &fst, bool safe = false) : ImplToExpandedFst<Impl>(fst, false) {} NGramFst() : ImplToExpandedFst<Impl>(std::make_shared<Impl>()) {} // Non-standard constructor to initialize NGramFst directly from data. NGramFst(const char *data, bool owned) : ImplToExpandedFst<Impl>(std::make_shared<Impl>()) { GetMutableImpl()->Init(data, owned, nullptr); } // Get method that gets the data associated with Init(). const char *GetData(size_t *data_size) const { return GetImpl()->GetData(data_size); } const std::vector<Label> GetContext(StateId s) const { return GetImpl()->GetContext(s, &inst_); } // Consumes as much as possible of context from right to left, returns the // the states corresponding to the increasingly conditioned input sequence. void GetStates(const std::vector<Label> &context, std::vector<StateId> *state) const { return GetImpl()->GetStates(context, state); } size_t NumArcs(StateId s) const override { return GetImpl()->NumArcs(s, &inst_); } NGramFst<A> *Copy(bool safe = false) const override { return new NGramFst(*this, safe); } static NGramFst<A> *Read(std::istream &strm, const FstReadOptions &opts) { Impl *impl = Impl::Read(strm, opts); return impl ? new NGramFst<A>(std::shared_ptr<Impl>(impl)) : nullptr; } static NGramFst<A> *Read(const string &filename) { if (!filename.empty()) { std::ifstream strm(filename, std::ios_base::in | std::ios_base::binary); if (!strm.good()) { LOG(ERROR) << "NGramFst::Read: Can't open file: " << filename; return nullptr; } return Read(strm, FstReadOptions(filename)); } else { return Read(std::cin, FstReadOptions("standard input")); } } bool Write(std::ostream &strm, const FstWriteOptions &opts) const override { return GetImpl()->Write(strm, opts); } bool Write(const string &filename) const override { return Fst<A>::WriteFile(filename); } inline void InitStateIterator(StateIteratorData<A> *data) const override { GetImpl()->InitStateIterator(data); } inline void InitArcIterator(StateId s, ArcIteratorData<A> *data) const override; MatcherBase<A> *InitMatcher(MatchType match_type) const override { return new NGramFstMatcher<A>(this, match_type); } size_t StorageSize() const { return GetImpl()->StorageSize(); } static bool HasRequiredProps(const Fst<A> &fst) { static const auto props = kAcceptor | kIDeterministic | kILabelSorted | kIEpsilons | kAccessible; return fst.Properties(props, true) == props; } static bool HasRequiredStructure(const Fst<A> &fst) { if (!HasRequiredProps(fst)) { return false; } typename A::StateId unigram = fst.Start(); while (true) { // Follows epsilon arc chain to find unigram state. if (unigram == fst::kNoStateId) return false; // No unigram state. typename fst::ArcIterator<Fst<A>> aiter(fst, unigram); if (aiter.Done() || aiter.Value().ilabel != 0) break; unigram = aiter.Value().nextstate; aiter.Next(); } // Other requirement: all states other than unigram an epsilon arc. for (fst::StateIterator<Fst<A>> siter(fst); !siter.Done(); siter.Next()) { const typename A::StateId &state = siter.Value(); fst::ArcIterator<Fst<A>> aiter(fst, state); if (state != unigram) { if (aiter.Done()) return false; if (aiter.Value().ilabel != 0) return false; aiter.Next(); if (!aiter.Done() && aiter.Value().ilabel == 0) return false; } } return true; } private: using ImplToExpandedFst<Impl, ExpandedFst<A>>::GetImpl; using ImplToExpandedFst<Impl, ExpandedFst<A>>::GetMutableImpl; explicit NGramFst(std::shared_ptr<Impl> impl) : ImplToExpandedFst<Impl>(impl) {} mutable NGramFstInst<A> inst_; }; template <class A> inline void NGramFst<A>::InitArcIterator(StateId s, ArcIteratorData<A> *data) const { GetImpl()->SetInstFuture(s, &inst_); GetImpl()->SetInstNode(&inst_); data->base = new ArcIterator<NGramFst<A>>(*this, s); } namespace internal { template <typename A> NGramFstImpl<A>::NGramFstImpl(const Fst<A> &fst, std::vector<StateId> *order_out) { typedef A Arc; typedef typename Arc::Label Label; typedef typename Arc::Weight Weight; typedef typename Arc::StateId StateId; SetType("ngram"); SetInputSymbols(fst.InputSymbols()); SetOutputSymbols(fst.OutputSymbols()); SetProperties(kStaticProperties); // Check basic requirements for an OpenGrm language model Fst. if (!NGramFst<A>::HasRequiredProps(fst)) { FSTERROR() << "NGramFst only accepts OpenGrm language models as input"; SetProperties(kError, kError); return; } int64 num_states = CountStates(fst); Label *context = new Label[num_states]; // Find the unigram state by starting from the start state, following // epsilons. StateId unigram = fst.Start(); while (1) { if (unigram == kNoStateId) { FSTERROR() << "Could not identify unigram state"; SetProperties(kError, kError); return; } ArcIterator<Fst<A>> aiter(fst, unigram); if (aiter.Done()) { LOG(WARNING) << "Unigram state " << unigram << " has no arcs."; break; } if (aiter.Value().ilabel != 0) break; unigram = aiter.Value().nextstate; } // Each state's context is determined by the subtree it is under from the // unigram state. std::queue<std::pair<StateId, Label>> label_queue; std::vector<bool> visited(num_states); // Force an epsilon link to the start state. label_queue.push(std::make_pair(fst.Start(), 0)); for (ArcIterator<Fst<A>> aiter(fst, unigram); !aiter.Done(); aiter.Next()) { label_queue.push( std::make_pair(aiter.Value().nextstate, aiter.Value().ilabel)); } // investigate states in breadth first fashion to assign context words. while (!label_queue.empty()) { std::pair<StateId, Label> &now = label_queue.front(); if (!visited[now.first]) { context[now.first] = now.second; visited[now.first] = true; for (ArcIterator<Fst<A>> aiter(fst, now.first); !aiter.Done(); aiter.Next()) { const Arc &arc = aiter.Value(); if (arc.ilabel != 0) { label_queue.push(std::make_pair(arc.nextstate, now.second)); } } } label_queue.pop(); } visited.clear(); // The arc from the start state should be assigned an epsilon to put it // in front of the all other labels (which makes Start state 1 after // unigram which is state 0). context[fst.Start()] = 0; // Build the tree of contexts fst by reversing the epsilon arcs from fst. VectorFst<Arc> context_fst; uint64 num_final = 0; for (int i = 0; i < num_states; ++i) { if (fst.Final(i) != Weight::Zero()) { ++num_final; } context_fst.SetFinal(context_fst.AddState(), fst.Final(i)); } context_fst.SetStart(unigram); context_fst.SetInputSymbols(fst.InputSymbols()); context_fst.SetOutputSymbols(fst.OutputSymbols()); int64 num_context_arcs = 0; int64 num_futures = 0; for (StateIterator<Fst<A>> siter(fst); !siter.Done(); siter.Next()) { const StateId &state = siter.Value(); num_futures += fst.NumArcs(state) - fst.NumInputEpsilons(state); ArcIterator<Fst<A>> aiter(fst, state); if (!aiter.Done()) { const Arc &arc = aiter.Value(); // this arc goes from state to arc.nextstate, so create an arc from // arc.nextstate to state to reverse it. if (arc.ilabel == 0) { context_fst.AddArc(arc.nextstate, Arc(context[state], context[state], arc.weight, state)); num_context_arcs++; } } } if (num_context_arcs != context_fst.NumStates() - 1) { FSTERROR() << "Number of contexts arcs != number of states - 1"; SetProperties(kError, kError); return; } if (context_fst.NumStates() != num_states) { FSTERROR() << "Number of contexts != number of states"; SetProperties(kError, kError); return; } int64 context_props = context_fst.Properties(kIDeterministic | kILabelSorted, true); if (!(context_props & kIDeterministic)) { FSTERROR() << "Input Fst is not structured properly"; SetProperties(kError, kError); return; } if (!(context_props & kILabelSorted)) { ArcSort(&context_fst, ILabelCompare<Arc>()); } delete[] context; uint64 b64; Weight weight; Label label = kNoLabel; const size_t storage = Storage(num_states, num_futures, num_final); MappedFile *data_region = MappedFile::Allocate(storage); char *data = reinterpret_cast<char *>(data_region->mutable_data()); memset(data, 0, storage); size_t offset = 0; memcpy(data + offset, reinterpret_cast<char *>(&num_states), sizeof(num_states)); offset += sizeof(num_states); memcpy(data + offset, reinterpret_cast<char *>(&num_futures), sizeof(num_futures)); offset += sizeof(num_futures); memcpy(data + offset, reinterpret_cast<char *>(&num_final), sizeof(num_final)); offset += sizeof(num_final); uint64 *context_bits = reinterpret_cast<uint64 *>(data + offset); offset += BitmapIndex::StorageSize(num_states * 2 + 1) * sizeof(b64); uint64 *future_bits = reinterpret_cast<uint64 *>(data + offset); offset += BitmapIndex::StorageSize(num_futures + num_states + 1) * sizeof(b64); uint64 *final_bits = reinterpret_cast<uint64 *>(data + offset); offset += BitmapIndex::StorageSize(num_states) * sizeof(b64); Label *context_words = reinterpret_cast<Label *>(data + offset); offset += (num_states + 1) * sizeof(label); Label *future_words = reinterpret_cast<Label *>(data + offset); offset += num_futures * sizeof(label); offset = (offset + sizeof(weight) - 1) & ~(sizeof(weight) - 1); Weight *backoff = reinterpret_cast<Weight *>(data + offset); offset += (num_states + 1) * sizeof(weight); Weight *final_probs = reinterpret_cast<Weight *>(data + offset); offset += num_final * sizeof(weight); Weight *future_probs = reinterpret_cast<Weight *>(data + offset); int64 context_arc = 0, future_arc = 0, context_bit = 0, future_bit = 0, final_bit = 0; // pseudo-root bits BitmapIndex::Set(context_bits, context_bit++); ++context_bit; context_words[context_arc] = label; backoff[context_arc] = Weight::Zero(); context_arc++; ++future_bit; if (order_out) { order_out->clear(); order_out->resize(num_states); } std::queue<StateId> context_q; context_q.push(context_fst.Start()); StateId state_number = 0; while (!context_q.empty()) { const StateId &state = context_q.front(); if (order_out) { (*order_out)[state] = state_number; } const Weight final_weight = context_fst.Final(state); if (final_weight != Weight::Zero()) { BitmapIndex::Set(final_bits, state_number); final_probs[final_bit] = final_weight; ++final_bit; } for (ArcIterator<VectorFst<A>> aiter(context_fst, state); !aiter.Done(); aiter.Next()) { const Arc &arc = aiter.Value(); context_words[context_arc] = arc.ilabel; backoff[context_arc] = arc.weight; ++context_arc; BitmapIndex::Set(context_bits, context_bit++); context_q.push(arc.nextstate); } ++context_bit; for (ArcIterator<Fst<A>> aiter(fst, state); !aiter.Done(); aiter.Next()) { const Arc &arc = aiter.Value(); if (arc.ilabel != 0) { future_words[future_arc] = arc.ilabel; future_probs[future_arc] = arc.weight; ++future_arc; BitmapIndex::Set(future_bits, future_bit++); } } ++future_bit; ++state_number; context_q.pop(); } if ((state_number != num_states) || (context_bit != num_states * 2 + 1) || (context_arc != num_states) || (future_arc != num_futures) || (future_bit != num_futures + num_states + 1) || (final_bit != num_final)) { FSTERROR() << "Structure problems detected during construction"; SetProperties(kError, kError); return; } Init(data, false, data_region); } template <typename A> inline void NGramFstImpl<A>::Init(const char *data, bool owned, MappedFile *data_region) { if (owned_) { delete[] data_; } data_region_.reset(data_region); owned_ = owned; data_ = data; size_t offset = 0; num_states_ = *(reinterpret_cast<const uint64 *>(data_ + offset)); offset += sizeof(num_states_); num_futures_ = *(reinterpret_cast<const uint64 *>(data_ + offset)); offset += sizeof(num_futures_); num_final_ = *(reinterpret_cast<const uint64 *>(data_ + offset)); offset += sizeof(num_final_); uint64 bits; size_t context_bits = num_states_ * 2 + 1; size_t future_bits = num_futures_ + num_states_ + 1; context_ = reinterpret_cast<const uint64 *>(data_ + offset); offset += BitmapIndex::StorageSize(context_bits) * sizeof(bits); future_ = reinterpret_cast<const uint64 *>(data_ + offset); offset += BitmapIndex::StorageSize(future_bits) * sizeof(bits); final_ = reinterpret_cast<const uint64 *>(data_ + offset); offset += BitmapIndex::StorageSize(num_states_) * sizeof(bits); context_words_ = reinterpret_cast<const Label *>(data_ + offset); offset += (num_states_ + 1) * sizeof(*context_words_); future_words_ = reinterpret_cast<const Label *>(data_ + offset); offset += num_futures_ * sizeof(*future_words_); offset = (offset + sizeof(*backoff_) - 1) & ~(sizeof(*backoff_) - 1); backoff_ = reinterpret_cast<const Weight *>(data_ + offset); offset += (num_states_ + 1) * sizeof(*backoff_); final_probs_ = reinterpret_cast<const Weight *>(data_ + offset); offset += num_final_ * sizeof(*final_probs_); future_probs_ = reinterpret_cast<const Weight *>(data_ + offset); context_index_.BuildIndex(context_, context_bits); future_index_.BuildIndex(future_, future_bits); final_index_.BuildIndex(final_, num_states_); select_root_ = context_index_.Select0s(0); if (context_index_.Rank1(0) != 0 || select_root_.first != 1 || context_index_.Get(2) == false) { FSTERROR() << "Malformed file"; SetProperties(kError, kError); return; } root_children_ = context_words_ + context_index_.Rank1(2); start_ = 1; } template <typename A> inline typename A::StateId NGramFstImpl<A>::Transition( const std::vector<Label> &context, Label future) const { const Label *children = root_children_; size_t num_children = select_root_.second - 2; const Label *loc = std::lower_bound(children, children + num_children, future); if (loc == children + num_children || *loc != future) { return context_index_.Rank1(0); } size_t node = 2 + loc - children; size_t node_rank = context_index_.Rank1(node); std::pair<size_t, size_t> zeros = (node_rank == 0) ? select_root_ : context_index_.Select0s(node_rank); size_t first_child = zeros.first + 1; if (context_index_.Get(first_child) == false) { return context_index_.Rank1(node); } size_t last_child = zeros.second - 1; for (int word = context.size() - 1; word >= 0; --word) { children = context_words_ + context_index_.Rank1(first_child); loc = std::lower_bound(children, children + last_child - first_child + 1, context[word]); if (loc == children + last_child - first_child + 1 || *loc != context[word]) { break; } node = first_child + loc - children; node_rank = context_index_.Rank1(node); zeros = (node_rank == 0) ? select_root_ : context_index_.Select0s(node_rank); first_child = zeros.first + 1; if (context_index_.Get(first_child) == false) break; last_child = zeros.second - 1; } return context_index_.Rank1(node); } } // namespace internal /*****************************************************************************/ template <class A> class NGramFstMatcher : public MatcherBase<A> { public: typedef A Arc; typedef typename A::Label Label; typedef typename A::StateId StateId; typedef typename A::Weight Weight; // This makes a copy of the FST. NGramFstMatcher(const NGramFst<A> &fst, MatchType match_type) : owned_fst_(fst.Copy()), fst_(*owned_fst_), inst_(fst_.inst_), match_type_(match_type), current_loop_(false), loop_(kNoLabel, 0, A::Weight::One(), kNoStateId) { if (match_type_ == MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } } // This doesn't copy the FST. NGramFstMatcher(const NGramFst<A> *fst, MatchType match_type) : fst_(*fst), inst_(fst_.inst_), match_type_(match_type), current_loop_(false), loop_(kNoLabel, 0, A::Weight::One(), kNoStateId) { if (match_type_ == MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } } // This makes a copy of the FST. NGramFstMatcher(const NGramFstMatcher<A> &matcher, bool safe = false) : owned_fst_(matcher.fst_.Copy(safe)), fst_(*owned_fst_), inst_(matcher.inst_), match_type_(matcher.match_type_), current_loop_(false), loop_(kNoLabel, 0, A::Weight::One(), kNoStateId) { if (match_type_ == MATCH_OUTPUT) { std::swap(loop_.ilabel, loop_.olabel); } } NGramFstMatcher<A> *Copy(bool safe = false) const override { return new NGramFstMatcher<A>(*this, safe); } MatchType Type(bool test) const override { return match_type_; } const Fst<A> &GetFst() const override { return fst_; } uint64 Properties(uint64 props) const override { return props; } void SetState(StateId s) final { fst_.GetImpl()->SetInstFuture(s, &inst_); current_loop_ = false; } bool Find(Label label) final { const Label nolabel = kNoLabel; done_ = true; if (label == 0 || label == nolabel) { if (label == 0) { current_loop_ = true; loop_.nextstate = inst_.state_; } // The unigram state has no epsilon arc. if (inst_.state_ != 0) { arc_.ilabel = arc_.olabel = 0; fst_.GetImpl()->SetInstNode(&inst_); arc_.nextstate = fst_.GetImpl()->context_index_.Rank1( fst_.GetImpl()->context_index_.Select1( fst_.GetImpl()->context_index_.Rank0(inst_.node_) - 1)); arc_.weight = fst_.GetImpl()->backoff_[inst_.state_]; done_ = false; } } else { current_loop_ = false; const Label *start = fst_.GetImpl()->future_words_ + inst_.offset_; const Label *end = start + inst_.num_futures_; const Label *search = std::lower_bound(start, end, label); if (search != end && *search == label) { size_t state = search - start; arc_.ilabel = arc_.olabel = label; arc_.weight = fst_.GetImpl()->future_probs_[inst_.offset_ + state]; fst_.GetImpl()->SetInstContext(&inst_); arc_.nextstate = fst_.GetImpl()->Transition(inst_.context_, label); done_ = false; } } return !Done(); } bool Done() const final { return !current_loop_ && done_; } const Arc &Value() const final { return (current_loop_) ? loop_ : arc_; } void Next() final { if (current_loop_) { current_loop_ = false; } else { done_ = true; } } std::ptrdiff_t Priority(StateId s) final { return fst_.NumArcs(s); } private: std::unique_ptr<NGramFst<A>> owned_fst_; const NGramFst<A> &fst_; NGramFstInst<A> inst_; MatchType match_type_; // Supplied by caller bool done_; Arc arc_; bool current_loop_; // Current arc is the implicit loop Arc loop_; }; /*****************************************************************************/ // Specialization for NGramFst; see generic version in fst.h // for sample usage (but use the ProdLmFst type!). This version // should inline. template <class A> class StateIterator<NGramFst<A>> : public StateIteratorBase<A> { public: typedef typename A::StateId StateId; explicit StateIterator(const NGramFst<A> &fst) : s_(0), num_states_(fst.NumStates()) {} bool Done() const final { return s_ >= num_states_; } StateId Value() const final { return s_; } void Next() final { ++s_; } void Reset() final { s_ = 0; } private: StateId s_; StateId num_states_; }; /*****************************************************************************/ template <class A> class ArcIterator<NGramFst<A>> : public ArcIteratorBase<A> { public: typedef A Arc; typedef typename A::Label Label; typedef typename A::StateId StateId; typedef typename A::Weight Weight; ArcIterator(const NGramFst<A> &fst, StateId state) : lazy_(~0), impl_(fst.GetImpl()), i_(0), flags_(kArcValueFlags) { inst_ = fst.inst_; impl_->SetInstFuture(state, &inst_); impl_->SetInstNode(&inst_); } bool Done() const final { return i_ >= ((inst_.node_ == 0) ? inst_.num_futures_ : inst_.num_futures_ + 1); } const Arc &Value() const final { bool eps = (inst_.node_ != 0 && i_ == 0); StateId state = (inst_.node_ == 0) ? i_ : i_ - 1; if (flags_ & lazy_ & (kArcILabelValue | kArcOLabelValue)) { arc_.ilabel = arc_.olabel = eps ? 0 : impl_->future_words_[inst_.offset_ + state]; lazy_ &= ~(kArcILabelValue | kArcOLabelValue); } if (flags_ & lazy_ & kArcNextStateValue) { if (eps) { arc_.nextstate = impl_->context_index_.Rank1(impl_->context_index_.Select1( impl_->context_index_.Rank0(inst_.node_) - 1)); } else { if (lazy_ & kArcNextStateValue) { impl_->SetInstContext(&inst_); // first time only. } arc_.nextstate = impl_->Transition( inst_.context_, impl_->future_words_[inst_.offset_ + state]); } lazy_ &= ~kArcNextStateValue; } if (flags_ & lazy_ & kArcWeightValue) { arc_.weight = eps ? impl_->backoff_[inst_.state_] : impl_->future_probs_[inst_.offset_ + state]; lazy_ &= ~kArcWeightValue; } return arc_; } void Next() final { ++i_; lazy_ = ~0; } size_t Position() const final { return i_; } void Reset() final { i_ = 0; lazy_ = ~0; } void Seek(size_t a) final { if (i_ != a) { i_ = a; lazy_ = ~0; } } uint32 Flags() const final { return flags_; } void SetFlags(uint32 flags, uint32 mask) final { flags_ &= ~mask; flags_ |= (flags & kArcValueFlags); } private: mutable Arc arc_; mutable uint32 lazy_; const internal::NGramFstImpl<A> *impl_; // Borrowed reference. mutable NGramFstInst<A> inst_; size_t i_; uint32 flags_; }; } // namespace fst #endif // FST_EXTENSIONS_NGRAM_NGRAM_FST_H_
0
coqui_public_repos/STT-models/polish/jaco-assistant
coqui_public_repos/STT-models/polish/jaco-assistant/v0.0.1/alphabet.txt
# Each line in this file represents the Unicode codepoint (UTF-8 encoded) # associated with a numeric label. # A line that starts with # is a comment. You can escape it with \# if you wish # to use '#' as a label. a b c d e f g h i j k l m n o p q r s t u v w x y z ć ń ó ś ź ż ą ę ł # The last (non-comment) line needs to end with a newline.
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/delightful_tts/phoneme_prosody_predictor.py
import torch import torch.nn as nn # pylint: disable=consider-using-from-import from TTS.tts.layers.delightful_tts.conv_layers import ConvTransposed class PhonemeProsodyPredictor(nn.Module): """Non-parallel Prosody Predictor inspired by: https://arxiv.org/pdf/2102.00851.pdf It consists of 2 layers of 1D convolutions each followed by a relu activation, layer norm and dropout, then finally a linear layer. Args: hidden_size (int): Size of hidden channels. kernel_size (int): Kernel size for the conv layers. dropout: (float): Probability of dropout. bottleneck_size (int): bottleneck size for last linear layer. lrelu_slope (float): Slope of the leaky relu. """ def __init__( self, hidden_size: int, kernel_size: int, dropout: float, bottleneck_size: int, lrelu_slope: float, ): super().__init__() self.d_model = hidden_size self.layers = nn.ModuleList( [ ConvTransposed( self.d_model, self.d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, ), nn.LeakyReLU(lrelu_slope), nn.LayerNorm(self.d_model), nn.Dropout(dropout), ConvTransposed( self.d_model, self.d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, ), nn.LeakyReLU(lrelu_slope), nn.LayerNorm(self.d_model), nn.Dropout(dropout), ] ) self.predictor_bottleneck = nn.Linear(self.d_model, bottleneck_size) def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: """ Shapes: x: :math: `[B, T, D]` mask: :math: `[B, T]` """ mask = mask.unsqueeze(2) for layer in self.layers: x = layer(x) x = x.masked_fill(mask, 0.0) x = self.predictor_bottleneck(x) return x
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/xtts/tokenizer.py
import os import re import textwrap from functools import cached_property import pypinyin import torch from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words from spacy.lang.ar import Arabic from spacy.lang.en import English from spacy.lang.es import Spanish from spacy.lang.ja import Japanese from spacy.lang.zh import Chinese from tokenizers import Tokenizer from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words def get_spacy_lang(lang): if lang == "zh": return Chinese() elif lang == "ja": return Japanese() elif lang == "ar": return Arabic() elif lang == "es": return Spanish() else: # For most languages, Enlish does the job return English() def split_sentence(text, lang, text_split_length=250): """Preprocess the input text""" text_splits = [] if text_split_length is not None and len(text) >= text_split_length: text_splits.append("") nlp = get_spacy_lang(lang) nlp.add_pipe("sentencizer") doc = nlp(text) for sentence in doc.sents: if len(text_splits[-1]) + len(str(sentence)) <= text_split_length: # if the last sentence + the current sentence is less than the text_split_length # then add the current sentence to the last sentence text_splits[-1] += " " + str(sentence) text_splits[-1] = text_splits[-1].lstrip() elif len(str(sentence)) > text_split_length: # if the current sentence is greater than the text_split_length for line in textwrap.wrap( str(sentence), width=text_split_length, drop_whitespace=True, break_on_hyphens=False, tabsize=1, ): text_splits.append(str(line)) else: text_splits.append(str(sentence)) if len(text_splits) > 1: if text_splits[0] == "": del text_splits[0] else: text_splits = [text.lstrip()] return text_splits _whitespace_re = re.compile(r"\s+") # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = { "en": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("mrs", "misess"), ("mr", "mister"), ("dr", "doctor"), ("st", "saint"), ("co", "company"), ("jr", "junior"), ("maj", "major"), ("gen", "general"), ("drs", "doctors"), ("rev", "reverend"), ("lt", "lieutenant"), ("hon", "honorable"), ("sgt", "sergeant"), ("capt", "captain"), ("esq", "esquire"), ("ltd", "limited"), ("col", "colonel"), ("ft", "fort"), ] ], "es": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("sra", "señora"), ("sr", "señor"), ("dr", "doctor"), ("dra", "doctora"), ("st", "santo"), ("co", "compañía"), ("jr", "junior"), ("ltd", "limitada"), ] ], "fr": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("mme", "madame"), ("mr", "monsieur"), ("dr", "docteur"), ("st", "saint"), ("co", "compagnie"), ("jr", "junior"), ("ltd", "limitée"), ] ], "de": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("fr", "frau"), ("dr", "doktor"), ("st", "sankt"), ("co", "firma"), ("jr", "junior"), ] ], "pt": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("sra", "senhora"), ("sr", "senhor"), ("dr", "doutor"), ("dra", "doutora"), ("st", "santo"), ("co", "companhia"), ("jr", "júnior"), ("ltd", "limitada"), ] ], "it": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # ("sig.ra", "signora"), ("sig", "signore"), ("dr", "dottore"), ("st", "santo"), ("co", "compagnia"), ("jr", "junior"), ("ltd", "limitata"), ] ], "pl": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("p", "pani"), ("m", "pan"), ("dr", "doktor"), ("sw", "święty"), ("jr", "junior"), ] ], "ar": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # There are not many common abbreviations in Arabic as in English. ] ], "zh": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # Chinese doesn't typically use abbreviations in the same way as Latin-based scripts. ] ], "cs": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("dr", "doktor"), # doctor ("ing", "inženýr"), # engineer ("p", "pan"), # Could also map to pani for woman but no easy way to do it # Other abbreviations would be specialized and not as common. ] ], "ru": [ (re.compile("\\b%s\\b" % x[0], re.IGNORECASE), x[1]) for x in [ ("г-жа", "госпожа"), # Mrs. ("г-н", "господин"), # Mr. ("д-р", "доктор"), # doctor # Other abbreviations are less common or specialized. ] ], "nl": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("dhr", "de heer"), # Mr. ("mevr", "mevrouw"), # Mrs. ("dr", "dokter"), # doctor ("jhr", "jonkheer"), # young lord or nobleman # Dutch uses more abbreviations, but these are the most common ones. ] ], "tr": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("b", "bay"), # Mr. ("byk", "büyük"), # büyük ("dr", "doktor"), # doctor # Add other Turkish abbreviations here if needed. ] ], "hu": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("dr", "doktor"), # doctor ("b", "bácsi"), # Mr. ("nőv", "nővér"), # nurse # Add other Hungarian abbreviations here if needed. ] ], "ko": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # Korean doesn't typically use abbreviations in the same way as Latin-based scripts. ] ], } def expand_abbreviations_multilingual(text, lang="en"): for regex, replacement in _abbreviations[lang]: text = re.sub(regex, replacement, text) return text _symbols_multilingual = { "en": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " and "), ("@", " at "), ("%", " percent "), ("#", " hash "), ("$", " dollar "), ("£", " pound "), ("°", " degree "), ] ], "es": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " y "), ("@", " arroba "), ("%", " por ciento "), ("#", " numeral "), ("$", " dolar "), ("£", " libra "), ("°", " grados "), ] ], "fr": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " et "), ("@", " arobase "), ("%", " pour cent "), ("#", " dièse "), ("$", " dollar "), ("£", " livre "), ("°", " degrés "), ] ], "de": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " und "), ("@", " at "), ("%", " prozent "), ("#", " raute "), ("$", " dollar "), ("£", " pfund "), ("°", " grad "), ] ], "pt": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " e "), ("@", " arroba "), ("%", " por cento "), ("#", " cardinal "), ("$", " dólar "), ("£", " libra "), ("°", " graus "), ] ], "it": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " e "), ("@", " chiocciola "), ("%", " per cento "), ("#", " cancelletto "), ("$", " dollaro "), ("£", " sterlina "), ("°", " gradi "), ] ], "pl": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " i "), ("@", " małpa "), ("%", " procent "), ("#", " krzyżyk "), ("$", " dolar "), ("£", " funt "), ("°", " stopnie "), ] ], "ar": [ # Arabic (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " و "), ("@", " على "), ("%", " في المئة "), ("#", " رقم "), ("$", " دولار "), ("£", " جنيه "), ("°", " درجة "), ] ], "zh": [ # Chinese (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " 和 "), ("@", " 在 "), ("%", " 百分之 "), ("#", " 号 "), ("$", " 美元 "), ("£", " 英镑 "), ("°", " 度 "), ] ], "cs": [ # Czech (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " a "), ("@", " na "), ("%", " procento "), ("#", " křížek "), ("$", " dolar "), ("£", " libra "), ("°", " stupně "), ] ], "ru": [ # Russian (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " и "), ("@", " собака "), ("%", " процентов "), ("#", " номер "), ("$", " доллар "), ("£", " фунт "), ("°", " градус "), ] ], "nl": [ # Dutch (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " en "), ("@", " bij "), ("%", " procent "), ("#", " hekje "), ("$", " dollar "), ("£", " pond "), ("°", " graden "), ] ], "tr": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " ve "), ("@", " at "), ("%", " yüzde "), ("#", " diyez "), ("$", " dolar "), ("£", " sterlin "), ("°", " derece "), ] ], "hu": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " és "), ("@", " kukac "), ("%", " százalék "), ("#", " kettőskereszt "), ("$", " dollár "), ("£", " font "), ("°", " fok "), ] ], "ko": [ # Korean (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " 그리고 "), ("@", " 에 "), ("%", " 퍼센트 "), ("#", " 번호 "), ("$", " 달러 "), ("£", " 파운드 "), ("°", " 도 "), ] ], } def expand_symbols_multilingual(text, lang="en"): for regex, replacement in _symbols_multilingual[lang]: text = re.sub(regex, replacement, text) text = text.replace(" ", " ") # Ensure there are no double spaces return text.strip() _ordinal_re = { "en": re.compile(r"([0-9]+)(st|nd|rd|th)"), "es": re.compile(r"([0-9]+)(º|ª|er|o|a|os|as)"), "fr": re.compile(r"([0-9]+)(º|ª|er|re|e|ème)"), "de": re.compile(r"([0-9]+)(st|nd|rd|th|º|ª|\.(?=\s|$))"), "pt": re.compile(r"([0-9]+)(º|ª|o|a|os|as)"), "it": re.compile(r"([0-9]+)(º|°|ª|o|a|i|e)"), "pl": re.compile(r"([0-9]+)(º|ª|st|nd|rd|th)"), "ar": re.compile(r"([0-9]+)(ون|ين|ث|ر|ى)"), "cs": re.compile(r"([0-9]+)\.(?=\s|$)"), # In Czech, a dot is often used after the number to indicate ordinals. "ru": re.compile(r"([0-9]+)(-й|-я|-е|-ое|-ье|-го)"), "nl": re.compile(r"([0-9]+)(de|ste|e)"), "tr": re.compile(r"([0-9]+)(\.|inci|nci|uncu|üncü|\.)"), "hu": re.compile(r"([0-9]+)(\.|adik|edik|odik|edik|ödik|ödike|ik)"), "ko": re.compile(r"([0-9]+)(번째|번|차|째)"), } _number_re = re.compile(r"[0-9]+") _currency_re = { "USD": re.compile(r"((\$[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+\$))"), "GBP": re.compile(r"((£[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+£))"), "EUR": re.compile(r"(([0-9\.\,]*[0-9]+€)|((€[0-9\.\,]*[0-9]+)))"), } _comma_number_re = re.compile(r"\b\d{1,3}(,\d{3})*(\.\d+)?\b") _dot_number_re = re.compile(r"\b\d{1,3}(.\d{3})*(\,\d+)?\b") _decimal_number_re = re.compile(r"([0-9]+[.,][0-9]+)") def _remove_commas(m): text = m.group(0) if "," in text: text = text.replace(",", "") return text def _remove_dots(m): text = m.group(0) if "." in text: text = text.replace(".", "") return text def _expand_decimal_point(m, lang="en"): amount = m.group(1).replace(",", ".") return num2words(float(amount), lang=lang if lang != "cs" else "cz") def _expand_currency(m, lang="en", currency="USD"): amount = float((re.sub(r"[^\d.]", "", m.group(0).replace(",", ".")))) full_amount = num2words(amount, to="currency", currency=currency, lang=lang if lang != "cs" else "cz") and_equivalents = { "en": ", ", "es": " con ", "fr": " et ", "de": " und ", "pt": " e ", "it": " e ", "pl": ", ", "cs": ", ", "ru": ", ", "nl": ", ", "ar": ", ", "tr": ", ", "hu": ", ", "ko": ", ", } if amount.is_integer(): last_and = full_amount.rfind(and_equivalents[lang]) if last_and != -1: full_amount = full_amount[:last_and] return full_amount def _expand_ordinal(m, lang="en"): return num2words(int(m.group(1)), ordinal=True, lang=lang if lang != "cs" else "cz") def _expand_number(m, lang="en"): return num2words(int(m.group(0)), lang=lang if lang != "cs" else "cz") def expand_numbers_multilingual(text, lang="en"): if lang == "zh": text = zh_num2words()(text) else: if lang in ["en", "ru"]: text = re.sub(_comma_number_re, _remove_commas, text) else: text = re.sub(_dot_number_re, _remove_dots, text) try: text = re.sub(_currency_re["GBP"], lambda m: _expand_currency(m, lang, "GBP"), text) text = re.sub(_currency_re["USD"], lambda m: _expand_currency(m, lang, "USD"), text) text = re.sub(_currency_re["EUR"], lambda m: _expand_currency(m, lang, "EUR"), text) except: pass if lang != "tr": text = re.sub(_decimal_number_re, lambda m: _expand_decimal_point(m, lang), text) text = re.sub(_ordinal_re[lang], lambda m: _expand_ordinal(m, lang), text) text = re.sub(_number_re, lambda m: _expand_number(m, lang), text) return text def lowercase(text): return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, " ", text) def multilingual_cleaners(text, lang): text = text.replace('"', "") if lang == "tr": text = text.replace("İ", "i") text = text.replace("Ö", "ö") text = text.replace("Ü", "ü") text = lowercase(text) text = expand_numbers_multilingual(text, lang) text = expand_abbreviations_multilingual(text, lang) text = expand_symbols_multilingual(text, lang=lang) text = collapse_whitespace(text) return text def basic_cleaners(text): """Basic pipeline that lowercases and collapses whitespace without transliteration.""" text = lowercase(text) text = collapse_whitespace(text) return text def chinese_transliterate(text): return "".join( [p[0] for p in pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True)] ) def japanese_cleaners(text, katsu): text = katsu.romaji(text) text = lowercase(text) return text def korean_transliterate(text): r = Transliter(academic) return r.translit(text) DEFAULT_VOCAB_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../data/tokenizer.json") class VoiceBpeTokenizer: def __init__(self, vocab_file=None): self.tokenizer = None if vocab_file is not None: self.tokenizer = Tokenizer.from_file(vocab_file) self.char_limits = { "en": 250, "de": 253, "fr": 273, "es": 239, "it": 213, "pt": 203, "pl": 224, "zh": 82, "ar": 166, "cs": 186, "ru": 182, "nl": 251, "tr": 226, "ja": 71, "hu": 224, "ko": 95, } @cached_property def katsu(self): import cutlet return cutlet.Cutlet() def check_input_length(self, txt, lang): lang = lang.split("-")[0] # remove the region limit = self.char_limits.get(lang, 250) if len(txt) > limit: print( f"[!] Warning: The text length exceeds the character limit of {limit} for language '{lang}', this might cause truncated audio." ) def preprocess_text(self, txt, lang): if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh", "ko"}: txt = multilingual_cleaners(txt, lang) if lang == "zh": txt = chinese_transliterate(txt) if lang == "ko": txt = korean_transliterate(txt) elif lang == "ja": txt = japanese_cleaners(txt, self.katsu) elif lang == "hi": # @manmay will implement this txt = basic_cleaners(txt) else: raise NotImplementedError(f"Language '{lang}' is not supported.") return txt def encode(self, txt, lang): lang = lang.split("-")[0] # remove the region self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) lang = "zh-cn" if lang == "zh" else lang txt = f"[{lang}]{txt}" txt = txt.replace(" ", "[SPACE]") return self.tokenizer.encode(txt).ids def decode(self, seq): if isinstance(seq, torch.Tensor): seq = seq.cpu().numpy() txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(" ", "") txt = txt.replace("[SPACE]", " ") txt = txt.replace("[STOP]", "") txt = txt.replace("[UNK]", "") return txt def __len__(self): return self.tokenizer.get_vocab_size() def get_number_tokens(self): return max(self.tokenizer.get_vocab().values()) + 1 def test_expand_numbers_multilingual(): test_cases = [ # English ("In 12.5 seconds.", "In twelve point five seconds.", "en"), ("There were 50 soldiers.", "There were fifty soldiers.", "en"), ("This is a 1st test", "This is a first test", "en"), ("That will be $20 sir.", "That will be twenty dollars sir.", "en"), ("That will be 20€ sir.", "That will be twenty euro sir.", "en"), ("That will be 20.15€ sir.", "That will be twenty euro, fifteen cents sir.", "en"), ("That's 100,000.5.", "That's one hundred thousand point five.", "en"), # French ("En 12,5 secondes.", "En douze virgule cinq secondes.", "fr"), ("Il y avait 50 soldats.", "Il y avait cinquante soldats.", "fr"), ("Ceci est un 1er test", "Ceci est un premier test", "fr"), ("Cela vous fera $20 monsieur.", "Cela vous fera vingt dollars monsieur.", "fr"), ("Cela vous fera 20€ monsieur.", "Cela vous fera vingt euros monsieur.", "fr"), ("Cela vous fera 20,15€ monsieur.", "Cela vous fera vingt euros et quinze centimes monsieur.", "fr"), ("Ce sera 100.000,5.", "Ce sera cent mille virgule cinq.", "fr"), # German ("In 12,5 Sekunden.", "In zwölf Komma fünf Sekunden.", "de"), ("Es gab 50 Soldaten.", "Es gab fünfzig Soldaten.", "de"), ("Dies ist ein 1. Test", "Dies ist ein erste Test", "de"), # Issue with gender ("Das macht $20 Herr.", "Das macht zwanzig Dollar Herr.", "de"), ("Das macht 20€ Herr.", "Das macht zwanzig Euro Herr.", "de"), ("Das macht 20,15€ Herr.", "Das macht zwanzig Euro und fünfzehn Cent Herr.", "de"), # Spanish ("En 12,5 segundos.", "En doce punto cinco segundos.", "es"), ("Había 50 soldados.", "Había cincuenta soldados.", "es"), ("Este es un 1er test", "Este es un primero test", "es"), ("Eso le costará $20 señor.", "Eso le costará veinte dólares señor.", "es"), ("Eso le costará 20€ señor.", "Eso le costará veinte euros señor.", "es"), ("Eso le costará 20,15€ señor.", "Eso le costará veinte euros con quince céntimos señor.", "es"), # Italian ("In 12,5 secondi.", "In dodici virgola cinque secondi.", "it"), ("C'erano 50 soldati.", "C'erano cinquanta soldati.", "it"), ("Questo è un 1° test", "Questo è un primo test", "it"), ("Ti costerà $20 signore.", "Ti costerà venti dollari signore.", "it"), ("Ti costerà 20€ signore.", "Ti costerà venti euro signore.", "it"), ("Ti costerà 20,15€ signore.", "Ti costerà venti euro e quindici centesimi signore.", "it"), # Portuguese ("Em 12,5 segundos.", "Em doze vírgula cinco segundos.", "pt"), ("Havia 50 soldados.", "Havia cinquenta soldados.", "pt"), ("Este é um 1º teste", "Este é um primeiro teste", "pt"), ("Isso custará $20 senhor.", "Isso custará vinte dólares senhor.", "pt"), ("Isso custará 20€ senhor.", "Isso custará vinte euros senhor.", "pt"), ( "Isso custará 20,15€ senhor.", "Isso custará vinte euros e quinze cêntimos senhor.", "pt", ), # "cêntimos" should be "centavos" num2words issue # Polish ("W 12,5 sekundy.", "W dwanaście przecinek pięć sekundy.", "pl"), ("Było 50 żołnierzy.", "Było pięćdziesiąt żołnierzy.", "pl"), ("To będzie kosztować 20€ panie.", "To będzie kosztować dwadzieścia euro panie.", "pl"), ("To będzie kosztować 20,15€ panie.", "To będzie kosztować dwadzieścia euro, piętnaście centów panie.", "pl"), # Arabic ("في الـ 12,5 ثانية.", "في الـ اثنا عشر , خمسون ثانية.", "ar"), ("كان هناك 50 جنديًا.", "كان هناك خمسون جنديًا.", "ar"), # ("ستكون النتيجة $20 يا سيد.", 'ستكون النتيجة عشرون دولار يا سيد.', 'ar'), # $ and € are mising from num2words # ("ستكون النتيجة 20€ يا سيد.", 'ستكون النتيجة عشرون يورو يا سيد.', 'ar'), # Czech ("Za 12,5 vteřiny.", "Za dvanáct celá pět vteřiny.", "cs"), ("Bylo tam 50 vojáků.", "Bylo tam padesát vojáků.", "cs"), ("To bude stát 20€ pane.", "To bude stát dvacet euro pane.", "cs"), ("To bude 20.15€ pane.", "To bude dvacet euro, patnáct centů pane.", "cs"), # Russian ("Через 12.5 секунды.", "Через двенадцать запятая пять секунды.", "ru"), ("Там было 50 солдат.", "Там было пятьдесят солдат.", "ru"), ("Это будет 20.15€ сэр.", "Это будет двадцать евро, пятнадцать центов сэр.", "ru"), ("Это будет стоить 20€ господин.", "Это будет стоить двадцать евро господин.", "ru"), # Dutch ("In 12,5 seconden.", "In twaalf komma vijf seconden.", "nl"), ("Er waren 50 soldaten.", "Er waren vijftig soldaten.", "nl"), ("Dat wordt dan $20 meneer.", "Dat wordt dan twintig dollar meneer.", "nl"), ("Dat wordt dan 20€ meneer.", "Dat wordt dan twintig euro meneer.", "nl"), # Chinese (Simplified) ("在12.5秒内", "在十二点五秒内", "zh"), ("有50名士兵", "有五十名士兵", "zh"), # ("那将是$20先生", '那将是二十美元先生', 'zh'), currency doesn't work # ("那将是20€先生", '那将是二十欧元先生', 'zh'), # Turkish # ("12,5 saniye içinde.", 'On iki virgül beş saniye içinde.', 'tr'), # decimal doesn't work for TR ("50 asker vardı.", "elli asker vardı.", "tr"), ("Bu 1. test", "Bu birinci test", "tr"), # ("Bu 100.000,5.", 'Bu yüz bin virgül beş.', 'tr'), # Hungarian ("12,5 másodperc alatt.", "tizenkettő egész öt tized másodperc alatt.", "hu"), ("50 katona volt.", "ötven katona volt.", "hu"), ("Ez az 1. teszt", "Ez az első teszt", "hu"), # Korean ("12.5 초 안에.", "십이 점 다섯 초 안에.", "ko"), ("50 명의 병사가 있었다.", "오십 명의 병사가 있었다.", "ko"), ("이것은 1 번째 테스트입니다", "이것은 첫 번째 테스트입니다", "ko"), ] for a, b, lang in test_cases: out = expand_numbers_multilingual(a, lang=lang) assert out == b, f"'{out}' vs '{b}'" def test_abbreviations_multilingual(): test_cases = [ # English ("Hello Mr. Smith.", "Hello mister Smith.", "en"), ("Dr. Jones is here.", "doctor Jones is here.", "en"), # Spanish ("Hola Sr. Garcia.", "Hola señor Garcia.", "es"), ("La Dra. Martinez es muy buena.", "La doctora Martinez es muy buena.", "es"), # French ("Bonjour Mr. Dupond.", "Bonjour monsieur Dupond.", "fr"), ("Mme. Moreau est absente aujourd'hui.", "madame Moreau est absente aujourd'hui.", "fr"), # German ("Frau Dr. Müller ist sehr klug.", "Frau doktor Müller ist sehr klug.", "de"), # Portuguese ("Olá Sr. Silva.", "Olá senhor Silva.", "pt"), ("Dra. Costa, você está disponível?", "doutora Costa, você está disponível?", "pt"), # Italian ("Buongiorno, Sig. Rossi.", "Buongiorno, signore Rossi.", "it"), # ("Sig.ra Bianchi, posso aiutarti?", 'signora Bianchi, posso aiutarti?', 'it'), # Issue with matching that pattern # Polish ("Dzień dobry, P. Kowalski.", "Dzień dobry, pani Kowalski.", "pl"), ("M. Nowak, czy mogę zadać pytanie?", "pan Nowak, czy mogę zadać pytanie?", "pl"), # Czech ("P. Novák", "pan Novák", "cs"), ("Dr. Vojtěch", "doktor Vojtěch", "cs"), # Dutch ("Dhr. Jansen", "de heer Jansen", "nl"), ("Mevr. de Vries", "mevrouw de Vries", "nl"), # Russian ("Здравствуйте Г-н Иванов.", "Здравствуйте господин Иванов.", "ru"), ("Д-р Смирнов здесь, чтобы увидеть вас.", "доктор Смирнов здесь, чтобы увидеть вас.", "ru"), # Turkish ("Merhaba B. Yılmaz.", "Merhaba bay Yılmaz.", "tr"), ("Dr. Ayşe burada.", "doktor Ayşe burada.", "tr"), # Hungarian ("Dr. Szabó itt van.", "doktor Szabó itt van.", "hu"), ] for a, b, lang in test_cases: out = expand_abbreviations_multilingual(a, lang=lang) assert out == b, f"'{out}' vs '{b}'" def test_symbols_multilingual(): test_cases = [ ("I have 14% battery", "I have 14 percent battery", "en"), ("Te veo @ la fiesta", "Te veo arroba la fiesta", "es"), ("J'ai 14° de fièvre", "J'ai 14 degrés de fièvre", "fr"), ("Die Rechnung beträgt £ 20", "Die Rechnung beträgt pfund 20", "de"), ("O meu email é ana&joao@gmail.com", "O meu email é ana e joao arroba gmail.com", "pt"), ("linguaggio di programmazione C#", "linguaggio di programmazione C cancelletto", "it"), ("Moja temperatura to 36.6°", "Moja temperatura to 36.6 stopnie", "pl"), ("Mám 14% baterie", "Mám 14 procento baterie", "cs"), ("Těším se na tebe @ party", "Těším se na tebe na party", "cs"), ("У меня 14% заряда", "У меня 14 процентов заряда", "ru"), ("Я буду @ дома", "Я буду собака дома", "ru"), ("Ik heb 14% batterij", "Ik heb 14 procent batterij", "nl"), ("Ik zie je @ het feest", "Ik zie je bij het feest", "nl"), ("لدي 14% في البطارية", "لدي 14 في المئة في البطارية", "ar"), ("我的电量为 14%", "我的电量为 14 百分之", "zh"), ("Pilim %14 dolu.", "Pilim yüzde 14 dolu.", "tr"), ("Az akkumulátorom töltöttsége 14%", "Az akkumulátorom töltöttsége 14 százalék", "hu"), ("배터리 잔량이 14%입니다.", "배터리 잔량이 14 퍼센트입니다.", "ko"), ] for a, b, lang in test_cases: out = expand_symbols_multilingual(a, lang=lang) assert out == b, f"'{out}' vs '{b}'" if __name__ == "__main__": test_expand_numbers_multilingual() test_abbreviations_multilingual() test_symbols_multilingual()
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/script/fstscript.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // The FST script interface permits users to interact with FSTs without knowing // their arc type. It does this by mapping compile-time polymorphism (in the // form of a arc-templated FST types) onto a shared virtual interface. It also // supports arc extension via a DSO interface. Due to the overhead of virtual // dispatch and registered function lookups, the script API is somewhat slower // then library API provided by types like StdVectorFst, but has the advantage // that it is designed not to crash (and to provide useful debugging // information) upon common user errors like passing invalid indices or // attempting comparison of incompatible FSTs. It is used both by the FST // binaries and the Python extension. // // This header includes all of the FST script functionality. #ifndef FST_SCRIPT_FSTSCRIPT_H_ #define FST_SCRIPT_FSTSCRIPT_H_ // Major classes #include <fst/script/arciterator-class.h> #include <fst/script/encodemapper-class.h> #include <fst/script/fst-class.h> #include <fst/script/stateiterator-class.h> #include <fst/script/text-io.h> #include <fst/script/weight-class.h> // Flag-to-enum parsers. #include <fst/script/getters.h> // Templates like Operation<> and Apply<>. #include <fst/script/script-impl.h> // Operations. #include <fst/script/arcsort.h> #include <fst/script/closure.h> #include <fst/script/compile.h> #include <fst/script/compose.h> #include <fst/script/concat.h> #include <fst/script/connect.h> #include <fst/script/convert.h> #include <fst/script/decode.h> #include <fst/script/determinize.h> #include <fst/script/difference.h> #include <fst/script/disambiguate.h> #include <fst/script/draw.h> #include <fst/script/encode.h> #include <fst/script/epsnormalize.h> #include <fst/script/equal.h> #include <fst/script/equivalent.h> #include <fst/script/info.h> #include <fst/script/intersect.h> #include <fst/script/invert.h> #include <fst/script/isomorphic.h> #include <fst/script/map.h> #include <fst/script/minimize.h> #include <fst/script/print.h> #include <fst/script/project.h> #include <fst/script/prune.h> #include <fst/script/push.h> #include <fst/script/randequivalent.h> #include <fst/script/randgen.h> #include <fst/script/relabel.h> #include <fst/script/replace.h> #include <fst/script/reverse.h> #include <fst/script/reweight.h> #include <fst/script/rmepsilon.h> #include <fst/script/shortest-distance.h> #include <fst/script/shortest-path.h> #include <fst/script/synchronize.h> #include <fst/script/topsort.h> #include <fst/script/union.h> #include <fst/script/verify.h> // This class is necessary because registering each of the operations // separately overfills the stack, as there's so many of them. namespace fst { namespace script { template <class Arc> class AllFstOperationsRegisterer { public: AllFstOperationsRegisterer() { RegisterBatch1(); RegisterBatch2(); } private: void RegisterBatch1() { REGISTER_FST_OPERATION(ArcSort, Arc, ArcSortArgs); REGISTER_FST_OPERATION(Closure, Arc, ClosureArgs); REGISTER_FST_OPERATION(CompileFstInternal, Arc, CompileFstArgs); REGISTER_FST_OPERATION(Compose, Arc, ComposeArgs); REGISTER_FST_OPERATION(Concat, Arc, ConcatArgs1); REGISTER_FST_OPERATION(Concat, Arc, ConcatArgs2); REGISTER_FST_OPERATION(Connect, Arc, MutableFstClass); REGISTER_FST_OPERATION(Convert, Arc, ConvertArgs); REGISTER_FST_OPERATION(Decode, Arc, DecodeArgs1); REGISTER_FST_OPERATION(Decode, Arc, DecodeArgs2); REGISTER_FST_OPERATION(Determinize, Arc, DeterminizeArgs); REGISTER_FST_OPERATION(Difference, Arc, DifferenceArgs); REGISTER_FST_OPERATION(Disambiguate, Arc, DisambiguateArgs); REGISTER_FST_OPERATION(DrawFst, Arc, FstDrawerArgs); REGISTER_FST_OPERATION(Encode, Arc, EncodeArgs1); REGISTER_FST_OPERATION(Encode, Arc, EncodeArgs2); REGISTER_FST_OPERATION(EpsNormalize, Arc, EpsNormalizeArgs); REGISTER_FST_OPERATION(Equal, Arc, EqualArgs); REGISTER_FST_OPERATION(Equivalent, Arc, EquivalentArgs); REGISTER_FST_OPERATION(PrintFstInfo, Arc, InfoArgs); REGISTER_FST_OPERATION(GetFstInfo, Arc, GetInfoArgs); REGISTER_FST_OPERATION(InitArcIteratorClass, Arc, InitArcIteratorClassArgs); REGISTER_FST_OPERATION(InitEncodeMapperClass, Arc, InitEncodeMapperClassArgs); REGISTER_FST_OPERATION(InitMutableArcIteratorClass, Arc, InitMutableArcIteratorClassArgs); REGISTER_FST_OPERATION(InitStateIteratorClass, Arc, InitStateIteratorClassArgs); } void RegisterBatch2() { REGISTER_FST_OPERATION(Intersect, Arc, IntersectArgs); REGISTER_FST_OPERATION(Invert, Arc, MutableFstClass); REGISTER_FST_OPERATION(Map, Arc, MapArgs); REGISTER_FST_OPERATION(Minimize, Arc, MinimizeArgs); REGISTER_FST_OPERATION(PrintFst, Arc, FstPrinterArgs); REGISTER_FST_OPERATION(Project, Arc, ProjectArgs); REGISTER_FST_OPERATION(Prune, Arc, PruneArgs1); REGISTER_FST_OPERATION(Prune, Arc, PruneArgs2); REGISTER_FST_OPERATION(Push, Arc, PushArgs1); REGISTER_FST_OPERATION(Push, Arc, PushArgs2); REGISTER_FST_OPERATION(RandEquivalent, Arc, RandEquivalentArgs); REGISTER_FST_OPERATION(RandGen, Arc, RandGenArgs); REGISTER_FST_OPERATION(Relabel, Arc, RelabelArgs1); REGISTER_FST_OPERATION(Relabel, Arc, RelabelArgs2); REGISTER_FST_OPERATION(Replace, Arc, ReplaceArgs); REGISTER_FST_OPERATION(Reverse, Arc, ReverseArgs); REGISTER_FST_OPERATION(Reweight, Arc, ReweightArgs); REGISTER_FST_OPERATION(RmEpsilon, Arc, RmEpsilonArgs); REGISTER_FST_OPERATION(ShortestDistance, Arc, ShortestDistanceArgs1); REGISTER_FST_OPERATION(ShortestDistance, Arc, ShortestDistanceArgs2); REGISTER_FST_OPERATION(ShortestPath, Arc, ShortestPathArgs); REGISTER_FST_OPERATION(Synchronize, Arc, SynchronizeArgs); REGISTER_FST_OPERATION(TopSort, Arc, TopSortArgs); REGISTER_FST_OPERATION(Union, Arc, UnionArgs); REGISTER_FST_OPERATION(Verify, Arc, VerifyArgs); } }; } // namespace script } // namespace fst #define REGISTER_FST_OPERATIONS(Arc) \ AllFstOperationsRegisterer<Arc> register_all_fst_operations##Arc; #endif // FST_SCRIPT_FSTSCRIPT_H_
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/openfst.sln
 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.27130.2026 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libfst", "src\lib\libfst.vcxproj", "{DE80EFEC-9ED9-4631-BD96-8568C31ED26D}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Control and build files", "Control and build files", "{F14A9AF0-0D79-4553-AA8E-B3905A9B6431}" ProjectSection(SolutionItems) = preProject .editorconfig = .editorconfig .gitignore = .gitignore src\openfst-multibin.targets = src\openfst-multibin.targets src\openfst.props = src\openfst.props src\openfst.targets = src\openfst.targets EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libfstscript", "src\script\libfstscript.vcxproj", "{111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bin", "src\bin\bin.vcxproj", "{84657A19-CAF2-49E8-8DB3-A428C19F460D}" ProjectSection(ProjectDependencies) = postProject {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6} = {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6} EndProjectSection EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "!! READ ME BEFORE BUILD !!", "!! READ ME BEFORE BUILD !!", "{3BAF0BB0-34BF-4E28-BC17-A80D7CF4130F}" ProjectSection(SolutionItems) = preProject src\openfst.user.props = src\openfst.user.props EndProjectSection EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Debug|x64.ActiveCfg = Debug|x64 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Debug|x64.Build.0 = Debug|x64 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Debug|x86.ActiveCfg = Debug|Win32 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Debug|x86.Build.0 = Debug|Win32 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Release|x64.ActiveCfg = Release|x64 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Release|x64.Build.0 = Release|x64 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Release|x86.ActiveCfg = Release|Win32 {DE80EFEC-9ED9-4631-BD96-8568C31ED26D}.Release|x86.Build.0 = Release|Win32 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Debug|x64.ActiveCfg = Debug|x64 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Debug|x64.Build.0 = Debug|x64 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Debug|x86.ActiveCfg = Debug|Win32 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Debug|x86.Build.0 = Debug|Win32 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Release|x64.ActiveCfg = Release|x64 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Release|x64.Build.0 = Release|x64 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Release|x86.ActiveCfg = Release|Win32 {111F46ED-DA1F-469B-B912-BA2ACC2FF8E6}.Release|x86.Build.0 = Release|Win32 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Debug|x64.ActiveCfg = Debug|x64 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Debug|x64.Build.0 = Debug|x64 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Debug|x86.ActiveCfg = Debug|Win32 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Debug|x86.Build.0 = Debug|Win32 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Release|x64.ActiveCfg = Release|x64 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Release|x64.Build.0 = Release|x64 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Release|x86.ActiveCfg = Release|Win32 {84657A19-CAF2-49E8-8DB3-A428C19F460D}.Release|x86.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {14CFFA93-2ED0-43D3-A3E9-5203D4CB19BF} EndGlobalSection EndGlobal
0
coqui_public_repos/STT-models/russian/jemeyer
coqui_public_repos/STT-models/russian/jemeyer/v0.1.0/MODEL_CARD.md
# Model card for Russian STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [Joe Meyer](https://www.linkedin.com/in/joe-meyer-25753951/). - Model language: Russian / русский язык / `ru` - Model date: May 12, 2021 - Model type: `Speech-to-Text` - Model version: `v0.1.0` - Compatible with 🐸 STT version: `v0.9.3` - License: CC-0 - Citation details: `@techreport{russian-stt, author = {Meyer,Joe}, title = {Russian STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {May}, number = {STT-CV6.1-RU-0.1} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [Russian Language](https://en.wikipedia.org/wiki/Russian_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates and Character Error Rates are reported for a non-official held-out test set from Common Voice 6.1 *with the use of an external language model*. The official `validated.tsv` was re-processed by [CorporaCreator](https://github.com/mozilla/corporacreator) to include all repeat sentences. |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|32.3\%|12.2\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on a non-official training set from Common Voice 6.1. The official `validated.tsv` was re-processed by [CorporaCreator](https://github.com/mozilla/corporacreator) to include all repeat sentences. ## Evaluation data This model was evaluated on a non-official testing set from Common Voice 6.1. The official `validated.tsv` was re-processed by [CorporaCreator](https://github.com/mozilla/corporacreator) to include all repeat sentences. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT-models/english/coqui
coqui_public_repos/STT-models/english/coqui/v1.0.0-digits/MODEL_CARD.md
# Model card for English STT v1.0.0 Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Maintained by [Coqui](https://coqui.ai/). - Model language: English / English / `en` - Model date: October 3, 2021 - Model type: `Small vocabulary Speech-to-Text` - Model version: `v1.0.0-digits` - Compatible with 🐸 STT version: `v1.0.0` - License: Apache 2.0 - Citation details: `@techreport{english-stt, author = {Coqui}, title = {English STT v1.0.0}, institution = {Coqui}, address = {\url{https://coqui.ai/models}} year = {2021}, month = {October}, number = {STT-EN-1.0.0} }` - Where to send questions or comments about the model: You can leave an issue on [`STT` issues](https://github.com/coqui-ai/STT/issues), open a new discussion on [`STT` discussions](https://github.com/coqui-ai/STT/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Closed vocabulary (digits "zero" through "nine") Speech-to-Text for the [English Language](https://en.wikipedia.org/wiki/English_language) on 16kHz, mono-channel audio. This acoustic model and language model pair will only be able to recognize the words {"zero","one","two","three","four","five","six","seven","eight" and "nine"}, which is a common use case in IVR systems. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics #### Model Size For STT, you always must deploy an acoustic model, and it is often the case you also will want to deploy an application-specific language model. The acoustic model comes in two forms: quantized and unquantized. There is a size<->accuracy trade-off for acoustic model quantization. For this combination of acoustic model and language model, we optimize for small size. |Model type|Vocabulary|Filename|Size| ----------------|-----|----------------|-----| |Acoustic model | open | `model_quantized.tflite` | 46M|| |Language model | small| `digits.scorer` |1.7K| ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on the following corpora: Common Voice 7.0 English (custom Coqui train/dev/test splits), LibriSpeech, and Multilingual Librispeech. In total approximately ~47,000 hours of data. ## Evaluation data The validation ("dev") sets came from CV, Librispeech, and MLS. Testing accuracy is reported for MLS and Librispeech. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/far/extract.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Extracts component FSTs from an finite-state archive. #ifndef FST_EXTENSIONS_FAR_EXTRACT_H_ #define FST_EXTENSIONS_FAR_EXTRACT_H_ #include <memory> #include <string> #include <vector> #include <fst/extensions/far/far.h> namespace fst { template <class Arc> inline void FarWriteFst(const Fst<Arc> *fst, string key, string *okey, int *nrep, int32 generate_filenames, int i, const string &filename_prefix, const string &filename_suffix) { if (key == *okey) { ++*nrep; } else { *nrep = 0; } *okey = key; string ofilename; if (generate_filenames) { std::ostringstream tmp; tmp.width(generate_filenames); tmp.fill('0'); tmp << i; ofilename = tmp.str(); } else { if (*nrep > 0) { std::ostringstream tmp; tmp << '.' << nrep; key.append(tmp.str().data(), tmp.str().size()); } ofilename = key; } fst->Write(filename_prefix + ofilename + filename_suffix); } template <class Arc> void FarExtract(const std::vector<string> &ifilenames, int32 generate_filenames, const string &keys, const string &key_separator, const string &range_delimiter, const string &filename_prefix, const string &filename_suffix) { std::unique_ptr<FarReader<Arc>> far_reader( FarReader<Arc>::Open(ifilenames)); if (!far_reader) return; string okey; int nrep = 0; std::vector<char *> key_vector; // User has specified a set of FSTs to extract, where some of these may in // fact be ranges. if (!keys.empty()) { auto *keys_cstr = new char[keys.size() + 1]; strcpy(keys_cstr, keys.c_str()); SplitString(keys_cstr, key_separator.c_str(), &key_vector, true); int i = 0; for (size_t k = 0; k < key_vector.size(); ++k, ++i) { string key = key_vector[k]; auto *key_cstr = new char[key.size() + 1]; strcpy(key_cstr, key.c_str()); std::vector<char *> range_vector; SplitString(key_cstr, range_delimiter.c_str(), &range_vector, false); if (range_vector.size() == 1) { // Not a range if (!far_reader->Find(key)) { LOG(ERROR) << "FarExtract: Cannot find key " << key; return; } const auto *fst = far_reader->GetFst(); FarWriteFst(fst, key, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } else if (range_vector.size() == 2) { // A legal range string begin_key = range_vector[0]; string end_key = range_vector[1]; if (begin_key.empty() || end_key.empty()) { LOG(ERROR) << "FarExtract: Illegal range specification " << key; return; } if (!far_reader->Find(begin_key)) { LOG(ERROR) << "FarExtract: Cannot find key " << begin_key; return; } for (; !far_reader->Done(); far_reader->Next(), ++i) { const auto &ikey = far_reader->GetKey(); if (end_key < ikey) break; const auto *fst = far_reader->GetFst(); FarWriteFst(fst, ikey, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } } else { LOG(ERROR) << "FarExtract: Illegal range specification " << key; return; } delete[] key_cstr; } delete[] keys_cstr; return; } // Nothing specified, so just extracts everything. for (size_t i = 1; !far_reader->Done(); far_reader->Next(), ++i) { const auto &key = far_reader->GetKey(); const auto *fst = far_reader->GetFst(); FarWriteFst(fst, key, &okey, &nrep, generate_filenames, i, filename_prefix, filename_suffix); } return; } } // namespace fst #endif // FST_EXTENSIONS_FAR_EXTRACT_H_
0
coqui_public_repos/Trainer
coqui_public_repos/Trainer/trainer/model.py
from abc import ABC, abstractmethod from typing import Any, Dict, List, Tuple, Union import torch from coqpit import Coqpit from torch import nn from trainer.trainer_utils import is_apex_available if is_apex_available(): from apex import amp # pylint: skip-file class TrainerModel(ABC, nn.Module): """Abstract 🐸TTS class. Every new 🐸TTS model must inherit this.""" @abstractmethod def forward(self, input: torch.Tensor, *args, aux_input={}, **kwargs) -> Dict: """Forward ... for the model mainly used in training. You can be flexible here and use different number of arguments and argument names since it is intended to be used by `train_step()` without exposing it out of the model. Args: input (torch.Tensor): Input tensor. aux_input (Dict): Auxiliary model inputs like embeddings, durations or any other sorts of inputs. Returns: Dict: Model outputs. Main model output must be named as "model_outputs". """ outputs_dict = {"model_outputs": None} ... return outputs_dict def format_batch(self, batch: Dict) -> Dict: """Format batch returned by the data loader before sending it to the model. If not implemented, model uses the batch as is. Can be used for data augmentation, feature ectraction, etc. """ return batch def format_batch_on_device(self, batch: Dict) -> Dict: """Format batch on device before sending it to the model. If not implemented, model uses the batch as is. Can be used for data augmentation, feature ectraction, etc.` """ return batch def train_step(self, *args: Any, **kwargs: Any) -> Tuple[Dict, Dict]: """Perform a single training step. Run the model forward ... and compute losses. Args: batch (Dict): Input tensors. criterion (nn.Module): Loss layer designed for the model. optimizer_idx (int): Index of optimizer to use. 0 for the generator and 1 for the discriminator networks. Returns: Tuple[Dict, Dict]: Model ouputs and computed losses. """ ... raise NotImplementedError(" [!] `train_step()` is not implemented.") def train_log(self, *args: Any, **kwargs: Any) -> None: """Create visualizations and waveform examples for training. For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to be projected onto Tensorboard. Args: batch (Dict): Model inputs used at the previous training step. outputs (Dict): Model outputs generated at the previoud training step. logger (Logger): Logger instance to log training plots. assets (Dict): Assets to be used for logging from the trainer's closure. steps (int): Number of training steps taken so far. Returns: Tuple[Dict, np.ndarray]: training plots and output waveform. """ ... raise NotImplementedError(" [!] `train_log()` is not implemented.") @torch.no_grad() def eval_step(self, *args: Any, **kwargs: Any): """Perform a single evaluation step. Run the model forward ... and compute losses. In most cases, you can call `train_step()` with no changes. Args: batch (Dict): Input tensors. criterion (nn.Module): Loss layer designed for the model. optimizer_idx (int): Index of optimizer to use. 0 for the generator and 1 for the discriminator networks. Returns: Tuple[Dict, Dict]: Model ouputs and computed losses. """ raise NotImplementedError(" [!] `eval_step()` is not implemented.") def eval_log(self, *args: Any, **kwargs: Any) -> None: """The same as `train_log()`""" ... raise NotImplementedError(" [!] `eval_log()` is not implemented.") @abstractmethod def get_data_loader(*args: Any, **kwargs: Any) -> torch.utils.data.DataLoader: """Get data loader for the model. Args: config (Coqpit): Configuration object. assets (Dict): Additional assets to be used for data loading. is_eval (bool): If True, returns evaluation data loader. samples (Union[List[Dict], List[List]]): List of samples to be used for data loading. verbose (bool): If True, prints data loading information. num_gpus (int): Number of GPUs used for training. rank (int): Rank of the current GPU. Returns: torch.utils.data.DataLoader: Data loader for the model. """ ... raise NotImplementedError(" [!] `get_data_loader()` is not implemented.") def init_for_training(self) -> None: """Initialize model for training.""" ... def optimize(self, *args: Any, **kwargs: Any) -> Tuple[Dict, Dict, float]: """Model specific optimization step that must perform the following steps: 1. Forward pass 2. Compute loss 3. Backward pass 4. Update weights Use `self.scaled_backward()` instead of `loss.backward()` to be able to use Mixed Precision Training. Args: batch (Dict): Input tensors. trainer (Trainer): Trainer instance to be able to access the training closure. Returns: Tuple[Dict, Dict, float]: Model outputs, loss dictionary and grad_norm value. """ ... raise NotImplementedError(" [!] `optimize()` is not implemented.") def scaled_backward( self, loss: torch.Tensor, trainer: "Trainer", optimizer: "Optimizer", *args: Any, **kwargs: Any ) -> Tuple[float, bool]: """Backward pass with gradient scaling for custom `optimize` calls. Args: loss (torch.Tensor): Loss to be backpropagated. trainer (Trainer): Trainer instance to be able to access the training closure. optimizer (Optimizer): Optimizer for APEX AMP based scaled `backward` calls. """ if trainer.use_amp_scaler: if trainer.use_apex: # https://nvidia.github.io/apex/advanced.html?highlight=accumulate#backward-passes-with-multiple-optimizers with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: # model optimizer step in mixed precision mode trainer.scaler.scale(loss).backward() else: # main model optimizer step loss.backward() # def get_optimizer(self) -> Union["Optimizer", List["Optimizer"]]: # """Setup an return optimizer or optimizers.""" # ... # def get_lr(self) -> Union[float, List[float]]: # """Return learning rate(s). # Returns: # Union[float, List[float]]: Model's initial learning rates. # """ # ... # def get_scheduler(self, optimizer: torch.optim.Optimizer): # ... # def get_criterion(self): # ...
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/far/far.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Finite-State Transducer (FST) archive classes. #ifndef FST_EXTENSIONS_FAR_FAR_H_ #define FST_EXTENSIONS_FAR_FAR_H_ #include <iostream> #include <sstream> #include <fst/log.h> #include <fst/extensions/far/stlist.h> #include <fst/extensions/far/sttable.h> #include <fst/fst.h> #include <fst/vector-fst.h> #include <fstream> namespace fst { enum FarEntryType { FET_LINE, FET_FILE }; enum FarTokenType { FTT_SYMBOL, FTT_BYTE, FTT_UTF8 }; inline bool IsFst(const string &filename) { std::ifstream strm(filename, std::ios_base::in | std::ios_base::binary); if (!strm) return false; return IsFstHeader(strm, filename); } // FST archive header class class FarHeader { public: const string &ArcType() const { return arctype_; } const string &FarType() const { return fartype_; } bool Read(const string &filename) { FstHeader fsthdr; if (filename.empty()) { // Header reading unsupported on stdin. Assumes STList and StdArc. fartype_ = "stlist"; arctype_ = "standard"; return true; } else if (IsSTTable(filename)) { // Checks if STTable. ReadSTTableHeader(filename, &fsthdr); fartype_ = "sttable"; arctype_ = fsthdr.ArcType().empty() ? "unknown" : fsthdr.ArcType(); return true; } else if (IsSTList(filename)) { // Checks if STList. ReadSTListHeader(filename, &fsthdr); fartype_ = "stlist"; arctype_ = fsthdr.ArcType().empty() ? "unknown" : fsthdr.ArcType(); return true; } else if (IsFst(filename)) { // Checks if FST. std::ifstream istrm(filename, std::ios_base::in | std::ios_base::binary); fsthdr.Read(istrm, filename); fartype_ = "fst"; arctype_ = fsthdr.ArcType().empty() ? "unknown" : fsthdr.ArcType(); return true; } return false; } private: string fartype_; string arctype_; }; enum FarType { FAR_DEFAULT = 0, FAR_STTABLE = 1, FAR_STLIST = 2, FAR_FST = 3, }; // This class creates an archive of FSTs. template <class A> class FarWriter { public: using Arc = A; // Creates a new (empty) FST archive; returns null on error. static FarWriter *Create(const string &filename, FarType type = FAR_DEFAULT); // Adds an FST to the end of an archive. Keys must be non-empty and // in lexicographic order. FSTs must have a suitable write method. virtual void Add(const string &key, const Fst<Arc> &fst) = 0; virtual FarType Type() const = 0; virtual bool Error() const = 0; virtual ~FarWriter() {} protected: FarWriter() {} }; // This class iterates through an existing archive of FSTs. template <class A> class FarReader { public: using Arc = A; // Opens an existing FST archive in a single file; returns null on error. // Sets current position to the beginning of the achive. static FarReader *Open(const string &filename); // Opens an existing FST archive in multiple files; returns null on error. // Sets current position to the beginning of the achive. static FarReader *Open(const std::vector<string> &filenames); // Resets current position to beginning of archive. virtual void Reset() = 0; // Sets current position to first entry >= key. Returns true if a match. virtual bool Find(const string &key) = 0; // Current position at end of archive? virtual bool Done() const = 0; // Move current position to next FST. virtual void Next() = 0; // Returns key at the current position. This reference is invalidated if // the current position in the archive is changed. virtual const string &GetKey() const = 0; // Returns pointer to FST at the current position. This is invalidated if // the current position in the archive is changed. virtual const Fst<Arc> *GetFst() const = 0; virtual FarType Type() const = 0; virtual bool Error() const = 0; virtual ~FarReader() {} protected: FarReader() {} }; template <class Arc> class FstWriter { public: void operator()(std::ostream &strm, const Fst<Arc> &fst) const { fst.Write(strm, FstWriteOptions()); } }; template <class A> class STTableFarWriter : public FarWriter<A> { public: using Arc = A; static STTableFarWriter *Create(const string &filename) { auto *writer = STTableWriter<Fst<Arc>, FstWriter<Arc>>::Create(filename); return new STTableFarWriter(writer); } void Add(const string &key, const Fst<Arc> &fst) final { writer_->Add(key, fst); } FarType Type() const final { return FAR_STTABLE; } bool Error() const final { return writer_->Error(); } private: explicit STTableFarWriter(STTableWriter<Fst<Arc>, FstWriter<Arc>> *writer) : writer_(writer) {} std::unique_ptr<STTableWriter<Fst<Arc>, FstWriter<Arc>>> writer_; }; template <class A> class STListFarWriter : public FarWriter<A> { public: using Arc = A; static STListFarWriter *Create(const string &filename) { auto *writer = STListWriter<Fst<Arc>, FstWriter<Arc>>::Create(filename); return new STListFarWriter(writer); } void Add(const string &key, const Fst<Arc> &fst) final { writer_->Add(key, fst); } constexpr FarType Type() const final { return FAR_STLIST; } bool Error() const final { return writer_->Error(); } private: explicit STListFarWriter(STListWriter<Fst<Arc>, FstWriter<Arc>> *writer) : writer_(writer) {} std::unique_ptr<STListWriter<Fst<Arc>, FstWriter<Arc>>> writer_; }; template <class A> class FstFarWriter : public FarWriter<A> { public: using Arc = A; explicit FstFarWriter(const string &filename) : filename_(filename), error_(false), written_(false) {} static FstFarWriter *Create(const string &filename) { return new FstFarWriter(filename); } void Add(const string &key, const Fst<A> &fst) final { if (written_) { LOG(WARNING) << "FstFarWriter::Add: only one FST supported," << " subsequent entries discarded."; } else { error_ = !fst.Write(filename_); written_ = true; } } constexpr FarType Type() const final { return FAR_FST; } bool Error() const final { return error_; } ~FstFarWriter() final {} private: string filename_; bool error_; bool written_; }; template <class Arc> FarWriter<Arc> *FarWriter<Arc>::Create(const string &filename, FarType type) { switch (type) { case FAR_DEFAULT: if (filename.empty()) return STListFarWriter<Arc>::Create(filename); case FAR_STTABLE: return STTableFarWriter<Arc>::Create(filename); case FAR_STLIST: return STListFarWriter<Arc>::Create(filename); case FAR_FST: return FstFarWriter<Arc>::Create(filename); default: LOG(ERROR) << "FarWriter::Create: Unknown FAR type"; return nullptr; } } template <class Arc> class FstReader { public: Fst<Arc> *operator()(std::istream &strm) const { return Fst<Arc>::Read(strm, FstReadOptions()); } }; template <class A> class STTableFarReader : public FarReader<A> { public: using Arc = A; static STTableFarReader *Open(const string &filename) { auto *reader = STTableReader<Fst<Arc>, FstReader<Arc>>::Open(filename); if (!reader || reader->Error()) return nullptr; return new STTableFarReader(reader); } static STTableFarReader *Open(const std::vector<string> &filenames) { auto *reader = STTableReader<Fst<Arc>, FstReader<Arc>>::Open(filenames); if (!reader || reader->Error()) return nullptr; return new STTableFarReader(reader); } void Reset() final { reader_->Reset(); } bool Find(const string &key) final { return reader_->Find(key); } bool Done() const final { return reader_->Done(); } void Next() final { return reader_->Next(); } const string &GetKey() const final { return reader_->GetKey(); } const Fst<Arc> *GetFst() const final { return reader_->GetEntry(); } constexpr FarType Type() const final { return FAR_STTABLE; } bool Error() const final { return reader_->Error(); } private: explicit STTableFarReader(STTableReader<Fst<Arc>, FstReader<Arc>> *reader) : reader_(reader) {} std::unique_ptr<STTableReader<Fst<Arc>, FstReader<Arc>>> reader_; }; template <class A> class STListFarReader : public FarReader<A> { public: using Arc = A; static STListFarReader *Open(const string &filename) { auto *reader = STListReader<Fst<Arc>, FstReader<Arc>>::Open(filename); if (!reader || reader->Error()) return nullptr; return new STListFarReader(reader); } static STListFarReader *Open(const std::vector<string> &filenames) { auto *reader = STListReader<Fst<Arc>, FstReader<Arc>>::Open(filenames); if (!reader || reader->Error()) return nullptr; return new STListFarReader(reader); } void Reset() final { reader_->Reset(); } bool Find(const string &key) final { return reader_->Find(key); } bool Done() const final { return reader_->Done(); } void Next() final { return reader_->Next(); } const string &GetKey() const final { return reader_->GetKey(); } const Fst<Arc> *GetFst() const final { return reader_->GetEntry(); } constexpr FarType Type() const final { return FAR_STLIST; } bool Error() const final { return reader_->Error(); } private: explicit STListFarReader(STListReader<Fst<Arc>, FstReader<Arc>> *reader) : reader_(reader) {} std::unique_ptr<STListReader<Fst<Arc>, FstReader<Arc>>> reader_; }; template <class A> class FstFarReader : public FarReader<A> { public: using Arc = A; static FstFarReader *Open(const string &filename) { std::vector<string> filenames; filenames.push_back(filename); return new FstFarReader<Arc>(filenames); } static FstFarReader *Open(const std::vector<string> &filenames) { return new FstFarReader<Arc>(filenames); } explicit FstFarReader(const std::vector<string> &filenames) : keys_(filenames), has_stdin_(false), pos_(0), error_(false) { std::sort(keys_.begin(), keys_.end()); streams_.resize(keys_.size(), 0); for (size_t i = 0; i < keys_.size(); ++i) { if (keys_[i].empty()) { if (!has_stdin_) { streams_[i] = &std::cin; // sources_[i] = "stdin"; has_stdin_ = true; } else { FSTERROR() << "FstFarReader::FstFarReader: standard input should " "only appear once in the input file list"; error_ = true; return; } } else { streams_[i] = new std::ifstream( keys_[i], std::ios_base::in | std::ios_base::binary); } } if (pos_ >= keys_.size()) return; ReadFst(); } void Reset() final { if (has_stdin_) { FSTERROR() << "FstFarReader::Reset: Operation not supported on standard input"; error_ = true; return; } pos_ = 0; ReadFst(); } bool Find(const string &key) final { if (has_stdin_) { FSTERROR() << "FstFarReader::Find: Operation not supported on standard input"; error_ = true; return false; } pos_ = 0; // TODO ReadFst(); return true; } bool Done() const final { return error_ || pos_ >= keys_.size(); } void Next() final { ++pos_; ReadFst(); } const string &GetKey() const final { return keys_[pos_]; } const Fst<Arc> *GetFst() const final { return fst_.get(); } constexpr FarType Type() const final { return FAR_FST; } bool Error() const final { return error_; } ~FstFarReader() final { for (size_t i = 0; i < keys_.size(); ++i) { if (streams_[i] != &std::cin) { delete streams_[i]; } } } private: void ReadFst() { fst_.reset(); if (pos_ >= keys_.size()) return; streams_[pos_]->seekg(0); fst_.reset(Fst<Arc>::Read(*streams_[pos_], FstReadOptions())); if (!fst_) { FSTERROR() << "FstFarReader: Error reading Fst from: " << keys_[pos_]; error_ = true; } } std::vector<string> keys_; std::vector<std::istream *> streams_; bool has_stdin_; size_t pos_; mutable std::unique_ptr<Fst<Arc>> fst_; mutable bool error_; }; template <class Arc> FarReader<Arc> *FarReader<Arc>::Open(const string &filename) { if (filename.empty()) return STListFarReader<Arc>::Open(filename); else if (IsSTTable(filename)) return STTableFarReader<Arc>::Open(filename); else if (IsSTList(filename)) return STListFarReader<Arc>::Open(filename); else if (IsFst(filename)) return FstFarReader<Arc>::Open(filename); return nullptr; } template <class Arc> FarReader<Arc> *FarReader<Arc>::Open(const std::vector<string> &filenames) { if (!filenames.empty() && filenames[0].empty()) return STListFarReader<Arc>::Open(filenames); else if (!filenames.empty() && IsSTTable(filenames[0])) return STTableFarReader<Arc>::Open(filenames); else if (!filenames.empty() && IsSTList(filenames[0])) return STListFarReader<Arc>::Open(filenames); else if (!filenames.empty() && IsFst(filenames[0])) return FstFarReader<Arc>::Open(filenames); return nullptr; } } // namespace fst #endif // FST_EXTENSIONS_FAR_FAR_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstsynchronize-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Synchronizes an FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/script/synchronize.h> int fstsynchronize_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; using fst::script::VectorFstClass; string usage = "Synchronizes an FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; VectorFstClass ofst(ifst->ArcType()); s::Synchronize(*ifst, &ofst); return !ofst.Write(out_name); }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/arc-map.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Class to map over/transform arcs e.g., change semirings or // implement project/invert. Consider using when operation does // not change the number of arcs (except possibly superfinal arcs). #ifndef FST_ARC_MAP_H_ #define FST_ARC_MAP_H_ #include <string> #include <unordered_map> #include <utility> #include <fst/log.h> #include <fst/cache.h> #include <fst/mutable-fst.h> namespace fst { // Determines how final weights are mapped. enum MapFinalAction { // A final weight is mapped into a final weight. An error is raised if this // is not possible. MAP_NO_SUPERFINAL, // A final weight is mapped to an arc to the superfinal state when the result // cannot be represented as a final weight. The superfinal state will be // added only if it is needed. MAP_ALLOW_SUPERFINAL, // A final weight is mapped to an arc to the superfinal state unless the // result can be represented as a final weight of weight Zero(). The // superfinal state is always added (if the input is not the empty FST). MAP_REQUIRE_SUPERFINAL }; // Determines how symbol tables are mapped. enum MapSymbolsAction { // Symbols should be cleared in the result by the map. MAP_CLEAR_SYMBOLS, // Symbols should be copied from the input FST by the map. MAP_COPY_SYMBOLS, // Symbols should not be modified in the result by the map itself. // (They may set by the mapper). MAP_NOOP_SYMBOLS }; // The ArcMapper interfaces defines how arcs and final weights are mapped. // This is useful for implementing operations that do not change the number of // arcs (expect possibly superfinal arcs). // // template <class A, class B> // class ArcMapper { // public: // using FromArc = A; // using ToArc = B; // // // Maps an arc type FromArc to arc type ToArc. // ToArc operator()(const FromArc &arc); // // // Specifies final action the mapper requires (see above). // // The mapper will be passed final weights as arcs of the form // // Arc(0, 0, weight, kNoStateId). // MapFinalAction FinalAction() const; // // // Specifies input symbol table action the mapper requires (see above). // MapSymbolsAction InputSymbolsAction() const; // // // Specifies output symbol table action the mapper requires (see above). // MapSymbolsAction OutputSymbolsAction() const; // // // This specifies the known properties of an FST mapped by this mapper. It // takes as argument the input FSTs's known properties. // uint64 Properties(uint64 props) const; // }; // // The ArcMap functions and classes below will use the FinalAction() // method of the mapper to determine how to treat final weights, e.g., whether // to add a superfinal state. They will use the Properties() method to set the // result FST properties. // // We include a various map versions below. One dimension of variation is // whether the mapping mutates its input, writes to a new result FST, or is an // on-the-fly FST. Another dimension is how we pass the mapper. We allow passing // the mapper by pointer for cases that we need to change the state of the // user's mapper. This is the case with the EncodeMapper, which is reused // during decoding. We also include map versions that pass the mapper by value // or const reference when this suffices. // Maps an arc type A using a mapper function object C, passed // by pointer. This version modifies its Fst input. template <class A, class C> void ArcMap(MutableFst<A> *fst, C *mapper) { using FromArc = A; using ToArc = A; using StateId = typename FromArc::StateId; using Weight = typename FromArc::Weight; if (mapper->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { fst->SetInputSymbols(nullptr); } if (mapper->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { fst->SetOutputSymbols(nullptr); } if (fst->Start() == kNoStateId) return; const auto props = fst->Properties(kFstProperties, false); const auto final_action = mapper->FinalAction(); auto superfinal = kNoStateId; if (final_action == MAP_REQUIRE_SUPERFINAL) { superfinal = fst->AddState(); fst->SetFinal(superfinal, Weight::One()); } for (StateIterator<MutableFst<FromArc>> siter(*fst); !siter.Done(); siter.Next()) { const auto state = siter.Value(); for (MutableArcIterator<MutableFst<FromArc>> aiter(fst, state); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); aiter.SetValue((*mapper)(arc)); } switch (final_action) { case MAP_NO_SUPERFINAL: default: { const FromArc arc(0, 0, fst->Final(state), kNoStateId); const auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMap: Non-zero arc labels for superfinal arc"; fst->SetProperties(kError, kError); } fst->SetFinal(state, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { if (state != superfinal) { const FromArc arc(0, 0, fst->Final(state), kNoStateId); auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { // Add a superfinal state if not already done. if (superfinal == kNoStateId) { superfinal = fst->AddState(); fst->SetFinal(superfinal, Weight::One()); } final_arc.nextstate = superfinal; fst->AddArc(state, final_arc); fst->SetFinal(state, Weight::Zero()); } else { fst->SetFinal(state, final_arc.weight); } } break; } case MAP_REQUIRE_SUPERFINAL: { if (state != superfinal) { const FromArc arc(0, 0, fst->Final(state), kNoStateId); const auto final_arc = (*mapper)(arc); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != Weight::Zero()) { fst->AddArc(state, ToArc(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal)); } fst->SetFinal(state, Weight::Zero()); } break; } } } fst->SetProperties(mapper->Properties(props), kFstProperties); } // Maps an arc type A using a mapper function object C, passed by value. This // version modifies its FST input. template <class A, class C> void ArcMap(MutableFst<A> *fst, C mapper) { ArcMap(fst, &mapper); } // Maps an arc type A to an arc type B using mapper function object C, // passed by pointer. This version writes the mapped input FST to an // output MutableFst. template <class A, class B, class C> void ArcMap(const Fst<A> &ifst, MutableFst<B> *ofst, C *mapper) { using FromArc = A; using StateId = typename FromArc::StateId; using Weight = typename FromArc::Weight; ofst->DeleteStates(); if (mapper->InputSymbolsAction() == MAP_COPY_SYMBOLS) { ofst->SetInputSymbols(ifst.InputSymbols()); } else if (mapper->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { ofst->SetInputSymbols(nullptr); } if (mapper->OutputSymbolsAction() == MAP_COPY_SYMBOLS) { ofst->SetOutputSymbols(ifst.OutputSymbols()); } else if (mapper->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { ofst->SetOutputSymbols(nullptr); } const auto iprops = ifst.Properties(kCopyProperties, false); if (ifst.Start() == kNoStateId) { if (iprops & kError) ofst->SetProperties(kError, kError); return; } const auto final_action = mapper->FinalAction(); if (ifst.Properties(kExpanded, false)) { ofst->ReserveStates( CountStates(ifst) + final_action == MAP_NO_SUPERFINAL ? 0 : 1); } // Adds all states. for (StateIterator<Fst<A>> siter(ifst); !siter.Done(); siter.Next()) { ofst->AddState(); } StateId superfinal = kNoStateId; if (final_action == MAP_REQUIRE_SUPERFINAL) { superfinal = ofst->AddState(); ofst->SetFinal(superfinal, B::Weight::One()); } for (StateIterator<Fst<A>> siter(ifst); !siter.Done(); siter.Next()) { StateId s = siter.Value(); if (s == ifst.Start()) ofst->SetStart(s); ofst->ReserveArcs(s, ifst.NumArcs(s)); for (ArcIterator<Fst<A>> aiter(ifst, s); !aiter.Done(); aiter.Next()) { ofst->AddArc(s, (*mapper)(aiter.Value())); } switch (final_action) { case MAP_NO_SUPERFINAL: default: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMap: Non-zero arc labels for superfinal arc"; ofst->SetProperties(kError, kError); } ofst->SetFinal(s, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { // Add a superfinal state if not already done. if (superfinal == kNoStateId) { superfinal = ofst->AddState(); ofst->SetFinal(superfinal, B::Weight::One()); } final_arc.nextstate = superfinal; ofst->AddArc(s, final_arc); ofst->SetFinal(s, B::Weight::Zero()); } else { ofst->SetFinal(s, final_arc.weight); } break; } case MAP_REQUIRE_SUPERFINAL: { B final_arc = (*mapper)(A(0, 0, ifst.Final(s), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != B::Weight::Zero()) { ofst->AddArc(s, B(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal)); } ofst->SetFinal(s, B::Weight::Zero()); break; } } } const auto oprops = ofst->Properties(kFstProperties, false); ofst->SetProperties(mapper->Properties(iprops) | oprops, kFstProperties); } // Maps an arc type A to an arc type B using mapper function // object C, passed by value. This version writes the mapped input // Fst to an output MutableFst. template <class A, class B, class C> void ArcMap(const Fst<A> &ifst, MutableFst<B> *ofst, C mapper) { ArcMap(ifst, ofst, &mapper); } struct ArcMapFstOptions : public CacheOptions { // ArcMapFst default caching behaviour is to do no caching. Most mappers are // cheap and therefore we save memory by not doing caching. ArcMapFstOptions() : CacheOptions(true, 0) {} explicit ArcMapFstOptions(const CacheOptions &opts) : CacheOptions(opts) {} }; template <class A, class B, class C> class ArcMapFst; namespace internal { // Implementation of delayed ArcMapFst. template <class A, class B, class C> class ArcMapFstImpl : public CacheImpl<B> { public: using Arc = B; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using FstImpl<B>::SetType; using FstImpl<B>::SetProperties; using FstImpl<B>::SetInputSymbols; using FstImpl<B>::SetOutputSymbols; using CacheImpl<B>::PushArc; using CacheImpl<B>::HasArcs; using CacheImpl<B>::HasFinal; using CacheImpl<B>::HasStart; using CacheImpl<B>::SetArcs; using CacheImpl<B>::SetFinal; using CacheImpl<B>::SetStart; friend class StateIterator<ArcMapFst<A, B, C>>; ArcMapFstImpl(const Fst<A> &fst, const C &mapper, const ArcMapFstOptions &opts) : CacheImpl<B>(opts), fst_(fst.Copy()), mapper_(new C(mapper)), own_mapper_(true), superfinal_(kNoStateId), nstates_(0) { Init(); } ArcMapFstImpl(const Fst<A> &fst, C *mapper, const ArcMapFstOptions &opts) : CacheImpl<B>(opts), fst_(fst.Copy()), mapper_(mapper), own_mapper_(false), superfinal_(kNoStateId), nstates_(0) { Init(); } ArcMapFstImpl(const ArcMapFstImpl<A, B, C> &impl) : CacheImpl<B>(impl), fst_(impl.fst_->Copy(true)), mapper_(new C(*impl.mapper_)), own_mapper_(true), superfinal_(kNoStateId), nstates_(0) { Init(); } ~ArcMapFstImpl() override { if (own_mapper_) delete mapper_; } StateId Start() { if (!HasStart()) SetStart(FindOState(fst_->Start())); return CacheImpl<B>::Start(); } Weight Final(StateId s) { if (!HasFinal(s)) { switch (final_action_) { case MAP_NO_SUPERFINAL: default: { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { FSTERROR() << "ArcMapFst: Non-zero arc labels for superfinal arc"; SetProperties(kError, kError); } SetFinal(s, final_arc.weight); break; } case MAP_ALLOW_SUPERFINAL: { if (s == superfinal_) { SetFinal(s, Weight::One()); } else { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel == 0 && final_arc.olabel == 0) { SetFinal(s, final_arc.weight); } else { SetFinal(s, Weight::Zero()); } } break; } case MAP_REQUIRE_SUPERFINAL: { SetFinal(s, s == superfinal_ ? Weight::One() : Weight::Zero()); break; } } } return CacheImpl<B>::Final(s); } size_t NumArcs(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumArcs(s); } size_t NumInputEpsilons(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumInputEpsilons(s); } size_t NumOutputEpsilons(StateId s) { if (!HasArcs(s)) Expand(s); return CacheImpl<B>::NumOutputEpsilons(s); } uint64 Properties() const override { return Properties(kFstProperties); } // Sets error if found, and returns other FST impl properties. uint64 Properties(uint64 mask) const override { if ((mask & kError) && (fst_->Properties(kError, false) || (mapper_->Properties(0) & kError))) { SetProperties(kError, kError); } return FstImpl<Arc>::Properties(mask); } void InitArcIterator(StateId s, ArcIteratorData<B> *data) { if (!HasArcs(s)) Expand(s); CacheImpl<B>::InitArcIterator(s, data); } void Expand(StateId s) { // Add exiting arcs. if (s == superfinal_) { SetArcs(s); return; } for (ArcIterator<Fst<A>> aiter(*fst_, FindIState(s)); !aiter.Done(); aiter.Next()) { auto aarc = aiter.Value(); aarc.nextstate = FindOState(aarc.nextstate); const auto &barc = (*mapper_)(aarc); PushArc(s, barc); } // Check for superfinal arcs. if (!HasFinal(s) || Final(s) == Weight::Zero()) { switch (final_action_) { case MAP_NO_SUPERFINAL: default: break; case MAP_ALLOW_SUPERFINAL: { auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) { if (superfinal_ == kNoStateId) superfinal_ = nstates_++; final_arc.nextstate = superfinal_; PushArc(s, final_arc); } break; } case MAP_REQUIRE_SUPERFINAL: { const auto final_arc = (*mapper_)(A(0, 0, fst_->Final(FindIState(s)), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0 || final_arc.weight != B::Weight::Zero()) { PushArc(s, B(final_arc.ilabel, final_arc.olabel, final_arc.weight, superfinal_)); } break; } } } SetArcs(s); } private: void Init() { SetType("map"); if (mapper_->InputSymbolsAction() == MAP_COPY_SYMBOLS) { SetInputSymbols(fst_->InputSymbols()); } else if (mapper_->InputSymbolsAction() == MAP_CLEAR_SYMBOLS) { SetInputSymbols(nullptr); } if (mapper_->OutputSymbolsAction() == MAP_COPY_SYMBOLS) { SetOutputSymbols(fst_->OutputSymbols()); } else if (mapper_->OutputSymbolsAction() == MAP_CLEAR_SYMBOLS) { SetOutputSymbols(nullptr); } if (fst_->Start() == kNoStateId) { final_action_ = MAP_NO_SUPERFINAL; SetProperties(kNullProperties); } else { final_action_ = mapper_->FinalAction(); uint64 props = fst_->Properties(kCopyProperties, false); SetProperties(mapper_->Properties(props)); if (final_action_ == MAP_REQUIRE_SUPERFINAL) superfinal_ = 0; } } // Maps from output state to input state. StateId FindIState(StateId s) { if (superfinal_ == kNoStateId || s < superfinal_) { return s; } else { return s - 1; } } // Maps from input state to output state. StateId FindOState(StateId is) { auto os = is; if (!(superfinal_ == kNoStateId || is < superfinal_)) ++os; if (os >= nstates_) nstates_ = os + 1; return os; } std::unique_ptr<const Fst<A>> fst_; C *mapper_; const bool own_mapper_; MapFinalAction final_action_; StateId superfinal_; StateId nstates_; }; } // namespace internal // Maps an arc type A to an arc type B using Mapper function object // C. This version is a delayed FST. template <class A, class B, class C> class ArcMapFst : public ImplToFst<internal::ArcMapFstImpl<A, B, C>> { public: using Arc = B; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using Store = DefaultCacheStore<B>; using State = typename Store::State; using Impl = internal::ArcMapFstImpl<A, B, C>; friend class ArcIterator<ArcMapFst<A, B, C>>; friend class StateIterator<ArcMapFst<A, B, C>>; ArcMapFst(const Fst<A> &fst, const C &mapper, const ArcMapFstOptions &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst, mapper, opts)) {} ArcMapFst(const Fst<A> &fst, C *mapper, const ArcMapFstOptions &opts) : ImplToFst<Impl>(std::make_shared<Impl>(fst, mapper, opts)) {} ArcMapFst(const Fst<A> &fst, const C &mapper) : ImplToFst<Impl>( std::make_shared<Impl>(fst, mapper, ArcMapFstOptions())) {} ArcMapFst(const Fst<A> &fst, C *mapper) : ImplToFst<Impl>( std::make_shared<Impl>(fst, mapper, ArcMapFstOptions())) {} // See Fst<>::Copy() for doc. ArcMapFst(const ArcMapFst<A, B, C> &fst, bool safe = false) : ImplToFst<Impl>(fst, safe) {} // Get a copy of this ArcMapFst. See Fst<>::Copy() for further doc. ArcMapFst<A, B, C> *Copy(bool safe = false) const override { return new ArcMapFst<A, B, C>(*this, safe); } inline void InitStateIterator(StateIteratorData<B> *data) const override; void InitArcIterator(StateId s, ArcIteratorData<B> *data) const override { GetMutableImpl()->InitArcIterator(s, data); } protected: using ImplToFst<Impl>::GetImpl; using ImplToFst<Impl>::GetMutableImpl; private: ArcMapFst &operator=(const ArcMapFst &) = delete; }; // Specialization for ArcMapFst. // // This may be derived from. template <class A, class B, class C> class StateIterator<ArcMapFst<A, B, C>> : public StateIteratorBase<B> { public: using StateId = typename B::StateId; explicit StateIterator(const ArcMapFst<A, B, C> &fst) : impl_(fst.GetImpl()), siter_(*impl_->fst_), s_(0), superfinal_(impl_->final_action_ == MAP_REQUIRE_SUPERFINAL) { CheckSuperfinal(); } bool Done() const final { return siter_.Done() && !superfinal_; } StateId Value() const final { return s_; } void Next() final { ++s_; if (!siter_.Done()) { siter_.Next(); CheckSuperfinal(); } else if (superfinal_) { superfinal_ = false; } } void Reset() final { s_ = 0; siter_.Reset(); superfinal_ = impl_->final_action_ == MAP_REQUIRE_SUPERFINAL; CheckSuperfinal(); } private: void CheckSuperfinal() { if (impl_->final_action_ != MAP_ALLOW_SUPERFINAL || superfinal_) return; if (!siter_.Done()) { const auto final_arc = (*impl_->mapper_)(A(0, 0, impl_->fst_->Final(s_), kNoStateId)); if (final_arc.ilabel != 0 || final_arc.olabel != 0) superfinal_ = true; } } const internal::ArcMapFstImpl<A, B, C> *impl_; StateIterator<Fst<A>> siter_; StateId s_; bool superfinal_; // True if there is a superfinal state and not done. }; // Specialization for ArcMapFst. template <class A, class B, class C> class ArcIterator<ArcMapFst<A, B, C>> : public CacheArcIterator<ArcMapFst<A, B, C>> { public: using StateId = typename A::StateId; ArcIterator(const ArcMapFst<A, B, C> &fst, StateId s) : CacheArcIterator<ArcMapFst<A, B, C>>(fst.GetMutableImpl(), s) { if (!fst.GetImpl()->HasArcs(s)) fst.GetMutableImpl()->Expand(s); } }; template <class A, class B, class C> inline void ArcMapFst<A, B, C>::InitStateIterator( StateIteratorData<B> *data) const { data->base = new StateIterator<ArcMapFst<A, B, C>>(*this); } // Utility Mappers. // Mapper that returns its input. template <class A> class IdentityArcMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return arc; } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props; } }; // Mapper that converts all input symbols to epsilon. template <class A> class InputEpsilonMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return ToArc(0, arc.olabel, arc.weight, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return (props & kSetArcProperties) | kIEpsilons; } }; // Mapper that converts all output symbols to epsilon. template <class A> class OutputEpsilonMapper { public: using FromArc = A; using ToArc = A; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, 0, arc.weight, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64 Properties(uint64 props) const { return (props & kSetArcProperties) | kOEpsilons; } }; // Mapper that returns its input with final states redirected to a single // super-final state. template <class A> class SuperFinalMapper { public: using FromArc = A; using ToArc = A; using Label = typename FromArc::Label; using Weight = typename FromArc::Weight;; // Arg allows setting super-final label. explicit SuperFinalMapper(Label final_label = 0) : final_label_(final_label) {} ToArc operator()(const FromArc &arc) const { // Super-final arc. if (arc.nextstate == kNoStateId && arc.weight != Weight::Zero()) { return ToArc(final_label_, final_label_, arc.weight, kNoStateId); } else { return arc; } } constexpr MapFinalAction FinalAction() const { return MAP_REQUIRE_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { if (final_label_ == 0) { return props & kAddSuperFinalProperties; } else { return props & kAddSuperFinalProperties & kILabelInvariantProperties & kOLabelInvariantProperties; } } private: Label final_label_; }; // Mapper that leaves labels and nextstate unchanged and constructs a new weight // from the underlying value of the arc weight. If no weight converter is // explictly specified, requires that there is a WeightConvert class // specialization that converts the weights. template <class A, class B, class C = WeightConvert<typename A::Weight, typename B::Weight>> class WeightConvertMapper { public: using FromArc = A; using ToArc = B; using Converter = C; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; explicit WeightConvertMapper(const Converter &c = Converter()) : convert_weight_(c) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, convert_weight_(arc.weight), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props; } private: Converter convert_weight_; }; // Non-precision-changing weight conversions; consider using more efficient // Cast method instead. using StdToLogMapper = WeightConvertMapper<StdArc, LogArc>; using LogToStdMapper = WeightConvertMapper<LogArc, StdArc>; // Precision-changing weight conversions. using StdToLog64Mapper = WeightConvertMapper<StdArc, Log64Arc>; using LogToLog64Mapper = WeightConvertMapper<LogArc, Log64Arc>; using Log64ToStdMapper = WeightConvertMapper<Log64Arc, StdArc>; using Log64ToLogMapper = WeightConvertMapper<Log64Arc, LogArc>; // Mapper from A to GallicArc<A>. template <class A, GallicType G = GALLIC_LEFT> class ToGallicMapper { public: using FromArc = A; using ToArc = GallicArc<A, G>; using SW = StringWeight<typename A::Label, GallicStringType(G)>; using AW = typename FromArc::Weight; using GW = typename ToArc::Weight; ToArc operator()(const FromArc &arc) const { // Super-final arc. if (arc.nextstate == kNoStateId && arc.weight != AW::Zero()) { return ToArc(0, 0, GW(SW::One(), arc.weight), kNoStateId); // Super-non-final arc. } else if (arc.nextstate == kNoStateId) { return ToArc(0, 0, GW::Zero(), kNoStateId); // Epsilon label. } else if (arc.olabel == 0) { return ToArc(arc.ilabel, arc.ilabel, GW(SW::One(), arc.weight), arc.nextstate); // Regular label. } else { return ToArc(arc.ilabel, arc.ilabel, GW(SW(arc.olabel), arc.weight), arc.nextstate); } } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64 Properties(uint64 props) const { return ProjectProperties(props, true) & kWeightInvariantProperties; } }; // Mapper from GallicArc<A> to A. template <class A, GallicType G = GALLIC_LEFT> class FromGallicMapper { public: using FromArc = GallicArc<A, G>; using ToArc = A; using Label = typename ToArc::Label; using AW = typename ToArc::Weight; using GW = typename FromArc::Weight; explicit FromGallicMapper(Label superfinal_label = 0) : superfinal_label_(superfinal_label), error_(false) {} ToArc operator()(const FromArc &arc) const { // 'Super-non-final' arc. if (arc.nextstate == kNoStateId && arc.weight == GW::Zero()) { return A(arc.ilabel, 0, AW::Zero(), kNoStateId); } Label l = kNoLabel; AW weight; if (!Extract(arc.weight, &weight, &l) || arc.ilabel != arc.olabel) { FSTERROR() << "FromGallicMapper: Unrepresentable weight: " << arc.weight << " for arc with ilabel = " << arc.ilabel << ", olabel = " << arc.olabel << ", nextstate = " << arc.nextstate; error_ = true; } if (arc.ilabel == 0 && l != 0 && arc.nextstate == kNoStateId) { return ToArc(superfinal_label_, l, weight, arc.nextstate); } else { return ToArc(arc.ilabel, l, weight, arc.nextstate); } } constexpr MapFinalAction FinalAction() const { return MAP_ALLOW_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64 Properties(uint64 inprops) const { uint64 outprops = inprops & kOLabelInvariantProperties & kWeightInvariantProperties & kAddSuperFinalProperties; if (error_) outprops |= kError; return outprops; } private: template <GallicType GT> static bool Extract(const GallicWeight<Label, AW, GT> &gallic_weight, typename A::Weight *weight, typename A::Label *label) { using GW = StringWeight<Label, GallicStringType(GT)>; const GW &w1 = gallic_weight.Value1(); const AW &w2 = gallic_weight.Value2(); typename GW::Iterator iter1(w1); const Label l = w1.Size() == 1 ? iter1.Value() : 0; if (l == kStringInfinity || l == kStringBad || w1.Size() > 1) return false; *label = l; *weight = w2; return true; } static bool Extract(const GallicWeight<Label, AW, GALLIC> &gallic_weight, typename A::Weight *weight, typename A::Label *label) { if (gallic_weight.Size() > 1) return false; if (gallic_weight.Size() == 0) { *label = 0; *weight = A::Weight::Zero(); return true; } return Extract<GALLIC_RESTRICT>(gallic_weight.Back(), weight, label); } const Label superfinal_label_; mutable bool error_; }; // Mapper from GallicArc<A> to A. template <class A, GallicType G = GALLIC_LEFT> class GallicToNewSymbolsMapper { public: using FromArc = GallicArc<A, G>; using ToArc = A; using Label = typename ToArc::Label; using StateId = typename ToArc::StateId; using AW = typename ToArc::Weight; using GW = typename FromArc::Weight; using SW = StringWeight<Label, GallicStringType(G)>; explicit GallicToNewSymbolsMapper(MutableFst<ToArc> *fst) : fst_(fst), lmax_(0), osymbols_(fst->OutputSymbols()), isymbols_(nullptr), error_(false) { fst_->DeleteStates(); state_ = fst_->AddState(); fst_->SetStart(state_); fst_->SetFinal(state_, AW::One()); if (osymbols_) { string name = osymbols_->Name() + "_from_gallic"; fst_->SetInputSymbols(new SymbolTable(name)); isymbols_ = fst_->MutableInputSymbols(); const int64 zero = 0; isymbols_->AddSymbol(osymbols_->Find(zero), 0); } else { fst_->SetInputSymbols(nullptr); } } ToArc operator()(const FromArc &arc) { // Super-non-final arc. if (arc.nextstate == kNoStateId && arc.weight == GW::Zero()) { return ToArc(arc.ilabel, 0, AW::Zero(), kNoStateId); } SW w1 = arc.weight.Value1(); AW w2 = arc.weight.Value2(); Label l; if (w1.Size() == 0) { l = 0; } else { auto insert_result = map_.insert(std::make_pair(w1, kNoLabel)); if (!insert_result.second) { l = insert_result.first->second; } else { l = ++lmax_; insert_result.first->second = l; StringWeightIterator<SW> iter1(w1); StateId n; string s; for (size_t i = 0, p = state_; i < w1.Size(); ++i, iter1.Next(), p = n) { n = i == w1.Size() - 1 ? state_ : fst_->AddState(); fst_->AddArc(p, ToArc(i ? 0 : l, iter1.Value(), AW::One(), n)); if (isymbols_) { if (i) s = s + "_"; s = s + osymbols_->Find(iter1.Value()); } } if (isymbols_) isymbols_->AddSymbol(s, l); } } if (l == kStringInfinity || l == kStringBad || arc.ilabel != arc.olabel) { FSTERROR() << "GallicToNewSymbolMapper: Unrepresentable weight: " << l; error_ = true; } return ToArc(arc.ilabel, l, w2, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_ALLOW_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64 Properties(uint64 inprops) const { uint64 outprops = inprops & kOLabelInvariantProperties & kWeightInvariantProperties & kAddSuperFinalProperties; if (error_) outprops |= kError; return outprops; } private: class StringKey { public: size_t operator()(const SW &x) const { return x.Hash(); } }; using Map = std::unordered_map<SW, Label, StringKey>; MutableFst<ToArc> *fst_; Map map_; Label lmax_; StateId state_; const SymbolTable *osymbols_; SymbolTable *isymbols_; mutable bool error_; }; // TODO(kbg): Add common base class for those mappers which do nothing except // mutate their weights. // Mapper to add a constant to all weights. template <class A> class PlusMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit PlusMapper(Weight weight) : weight_(std::move(weight)) {} ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Plus(arc.weight, weight_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props & kWeightInvariantProperties; } private: const Weight weight_; }; // Mapper to (right) multiply a constant to all weights. template <class A> class TimesMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit TimesMapper(Weight weight) : weight_(std::move(weight)) {} ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Times(arc.weight, weight_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props & kWeightInvariantProperties; } private: const Weight weight_; }; // Mapper to take all weights to a constant power. The power argument is stored // as a double, so if there is a floating-point power implementation for this // weight type, it will take precedence. Otherwise, the power argument's 53 bits // of integer precision will be implicitly converted to a size_t and the default // power implementation (iterated multiplication) will be used instead. template <class A> class PowerMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; explicit PowerMapper(double power) : power_(power) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, Power(arc.weight, power_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props & kWeightInvariantProperties; } private: const double power_; }; // Mapper to reciprocate all non-Zero() weights. template <class A> class InvertWeightMapper { public: using FromArc = A; using ToArc = A; using Weight = typename FromArc::Weight; ToArc operator()(const FromArc &arc) const { if (arc.weight == Weight::Zero()) return arc; return ToArc(arc.ilabel, arc.olabel, Divide(Weight::One(), arc.weight), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props & kWeightInvariantProperties; } }; // Mapper to map all non-Zero() weights to One(). template <class A, class B = A> class RmWeightMapper { public: using FromArc = A; using ToArc = B; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight != FromWeight::Zero() ? ToWeight::One() : ToWeight::Zero(), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return (props & kWeightInvariantProperties) | kUnweighted; } }; // Mapper to quantize all weights. template <class A, class B = A> class QuantizeMapper { public: using FromArc = A; using ToArc = B; using FromWeight = typename FromArc::Weight; using ToWeight = typename ToArc::Weight; QuantizeMapper() : delta_(kDelta) {} explicit QuantizeMapper(float d) : delta_(d) {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight.Quantize(delta_), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props & kWeightInvariantProperties; } private: const float delta_; }; // Mapper from A to B under the assumption: // // B::Weight = A::Weight::ReverseWeight // B::Label == A::Label // B::StateId == A::StateId // // The weight is reversed, while the label and nextstate are preserved. template <class A, class B> class ReverseWeightMapper { public: using FromArc = A; using ToArc = B; ToArc operator()(const FromArc &arc) const { return ToArc(arc.ilabel, arc.olabel, arc.weight.Reverse(), arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } uint64 Properties(uint64 props) const { return props; } }; } // namespace fst #endif // FST_ARC_MAP_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/lib/weight.cc
#include <fst/weight.h> DEFINE_string(fst_weight_separator, ",", "Character separator between printed composite weights; " "must be a single character"); DEFINE_string(fst_weight_parentheses, "", "Characters enclosing the first weight of a printed composite " "weight (e.g., pair weight, tuple weight and derived classes) to " "ensure proper I/O of nested composite weights; " "must have size 0 (none) or 2 (open and close parenthesis)"); namespace fst { namespace internal { CompositeWeightIO::CompositeWeightIO(char separator, std::pair<char, char> parentheses) : separator_(separator), open_paren_(parentheses.first), close_paren_(parentheses.second), error_(false) { if ((open_paren_ == 0 || close_paren_ == 0) && open_paren_ != close_paren_) { FSTERROR() << "Invalid configuration of weight parentheses: " << static_cast<int>(open_paren_) << " " << static_cast<int>(close_paren_); error_ = true; } } CompositeWeightIO::CompositeWeightIO() : CompositeWeightIO(FLAGS_fst_weight_separator.empty() ? 0 : FLAGS_fst_weight_separator.front(), {FLAGS_fst_weight_parentheses.empty() ? 0 : FLAGS_fst_weight_parentheses[0], FLAGS_fst_weight_parentheses.size() < 2 ? 0 : FLAGS_fst_weight_parentheses[1]}) { if (FLAGS_fst_weight_separator.size() != 1) { FSTERROR() << "CompositeWeight: " << "FLAGS_fst_weight_separator.size() is not equal to 1"; error_ = true; } if (!FLAGS_fst_weight_parentheses.empty() && FLAGS_fst_weight_parentheses.size() != 2) { FSTERROR() << "CompositeWeight: " << "FLAGS_fst_weight_parentheses.size() is not equal to 2"; error_ = true; } } } // namespace internal CompositeWeightWriter::CompositeWeightWriter(std::ostream &ostrm) : ostrm_(ostrm) { if (error()) ostrm.clear(std::ios::badbit); } CompositeWeightWriter::CompositeWeightWriter(std::ostream &ostrm, char separator, std::pair<char, char> parentheses) : internal::CompositeWeightIO(separator, parentheses), ostrm_(ostrm) { if (error()) ostrm_.clear(std::ios::badbit); } void CompositeWeightWriter::WriteBegin() { if (open_paren_ != 0) { ostrm_ << open_paren_; } } void CompositeWeightWriter::WriteEnd() { if (close_paren_ != 0) { ostrm_ << close_paren_; } } CompositeWeightReader::CompositeWeightReader(std::istream &istrm) : istrm_(istrm) { if (error()) istrm_.clear(std::ios::badbit); } CompositeWeightReader::CompositeWeightReader(std::istream &istrm, char separator, std::pair<char, char> parentheses) : internal::CompositeWeightIO(separator, parentheses), istrm_(istrm) { if (error()) istrm_.clear(std::ios::badbit); } void CompositeWeightReader::ReadBegin() { do { // Skips whitespace. c_ = istrm_.get(); } while (std::isspace(c_)); if (open_paren_ != 0) { if (c_ != open_paren_) { FSTERROR() << "CompositeWeightReader: Open paren missing: " << "fst_weight_parentheses flag set correcty?"; istrm_.clear(std::ios::badbit); return; } ++depth_; c_ = istrm_.get(); } } void CompositeWeightReader::ReadEnd() { if (c_ != EOF && !std::isspace(c_)) { FSTERROR() << "CompositeWeightReader: excess character: '" << static_cast<char>(c_) << "': fst_weight_parentheses flag set correcty?"; istrm_.clear(std::ios::badbit); } } } // namespace fst
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/providers/nnapi/nnapi_provider_factory.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "onnxruntime_c_api.h" // NNAPIFlags are bool options we want to set for NNAPI EP // This enum is defined as bit flags, and cannot have negative value // To generate an uint32_t nnapi_flags for using with OrtSessionOptionsAppendExecutionProvider_Nnapi below, // uint32_t nnapi_flags = 0; // nnapi_flags |= NNAPI_FLAG_USE_FP16; enum NNAPIFlags { NNAPI_FLAG_USE_NONE = 0x000, // Using fp16 relaxation in NNAPI EP, this may improve perf but may also reduce precision NNAPI_FLAG_USE_FP16 = 0x001, // Use NCHW layout in NNAPI EP, this is only available after Android API level 29 // Please note for now, NNAPI perform worse using NCHW compare to using NHWC NNAPI_FLAG_USE_NCHW = 0x002, // Prevent NNAPI from using CPU devices. // // NNAPI is more efficient using GPU or NPU for execution, and NNAPI might fall back to its own CPU implementation // for operations not supported by GPU/NPU. The CPU implementation of NNAPI (which is called nnapi-reference) // might be less efficient than the optimized versions of the operation of ORT. It might be advantageous to disable // the NNAPI CPU fallback and handle execution using ORT kernels. // // For some models, if NNAPI would use CPU to execute an operation, and this flag is set, the execution of the // model may fall back to ORT kernels. // // This option is only available after Android API level 29, and will be ignored for Android API level 28- // // For NNAPI device assignments, see https://developer.android.com/ndk/guides/neuralnetworks#device-assignment // For NNAPI CPU fallback, see https://developer.android.com/ndk/guides/neuralnetworks#cpu-fallback NNAPI_FLAG_CPU_DISABLED = 0x004, // Keep NNAPI_FLAG_MAX at the end of the enum definition // And assign the last NNAPIFlag to it NNAPI_FLAG_LAST = NNAPI_FLAG_CPU_DISABLED, }; #ifdef __cplusplus extern "C" { #endif ORT_EXPORT ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_Nnapi, _In_ OrtSessionOptions* options, uint32_t nnapi_flags); #ifdef __cplusplus } #endif
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/pdt/pdtexpand.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Expands a (bounded-stack) PDT as an FST. #include <cstring> #include <memory> #include <string> #include <vector> #include <fst/flags.h> #include <fst/log.h> #include <fst/extensions/pdt/pdtscript.h> #include <fst/util.h> DEFINE_string(pdt_parentheses, "", "PDT parenthesis label pairs"); DEFINE_bool(connect, true, "Trim output?"); DEFINE_bool(keep_parentheses, false, "Keep PDT parentheses in result?"); DEFINE_string(weight, "", "Weight threshold"); int main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; using fst::script::VectorFstClass; using fst::script::WeightClass; using fst::ReadLabelPairs; string usage = "Expand a (bounded-stack) PDT as an FST.\n\n Usage: "; usage += argv[0]; usage += " in.pdt [out.fst]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && (strcmp(argv[1], "-") != 0)) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; if (FLAGS_pdt_parentheses.empty()) { LOG(ERROR) << argv[0] << ": No PDT parenthesis label pairs provided"; return 1; } std::vector<s::LabelPair> parens; if (!ReadLabelPairs(FLAGS_pdt_parentheses, &parens, false)) return 1; const auto weight_threshold = FLAGS_weight.empty() ? WeightClass::Zero(ifst->WeightType()) : WeightClass(ifst->WeightType(), FLAGS_weight); VectorFstClass ofst(ifst->ArcType()); s::PdtExpand(*ifst, parens, &ofst, s::PdtExpandOptions(FLAGS_connect, FLAGS_keep_parentheses, weight_threshold)); ofst.Write(out_name); return 0; }
0
coqui_public_repos/STT-examples/net_framework
coqui_public_repos/STT-examples/net_framework/STTWPF/App.config
<?xml version="1.0" encoding="utf-8" ?> <configuration> <startup> <supportedRuntime version="v4.0" sku=".NETFramework,Version=v4.6.2" /> </startup> </configuration>
0
coqui_public_repos/inference-engine/third_party/kenlm/lm/common
coqui_public_repos/inference-engine/third_party/kenlm/lm/common/test_data/toy0.arpa
\data\ ngram 1=5 ngram 2=7 ngram 3=7 \1-grams: -0.90309 <unk> 0 0 <s> -0.30103 -0.46943438 a -0.30103 -0.5720968 </s> 0 -0.5720968 b -0.30103 \2-grams: -0.37712017 <s> a -0.30103 -0.37712017 a a -0.30103 -0.2984526 b a -0.30103 -0.58682007 a </s> 0 -0.52201796 b </s> 0 -0.41574955 <s> b -0.30103 -0.58682007 a b -0.30103 \3-grams: -0.14885087 <s> a a -0.33741078 b a a -0.124077894 <s> b a -0.2997394 a b a -0.42082912 b a </s> -0.397617 a b </s> -0.20102891 a a b \end\
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_14x_8k_multiarchpkg-linux-amd64-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "node-package-cpu" - "test-training_8k-linux-amd64-py36m-opt" test_model_task: "test-training_8k-linux-amd64-py36m-opt" system_setup: > ${nodejs.packages_xenial.prep_14} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 14.x 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU NodeJS MultiArch Package 14.x tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS MultiArch Package v14.x, CPU only, optimized version (8kHz)"
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/compare_samples.py
#!/usr/bin/env python """ Tool for comparing two wav samples """ import argparse import sys import numpy as np from coqui_stt_training.util.audio import AUDIO_TYPE_NP, mean_dbfs from coqui_stt_training.util.sample_collections import load_sample def fail(message): print(message, file=sys.stderr, flush=True) sys.exit(1) def compare_samples(): sample1 = load_sample(CLI_ARGS.sample1).unpack() sample2 = load_sample(CLI_ARGS.sample2).unpack() if sample1.audio_format != sample2.audio_format: fail( "Samples differ on: audio-format ({} and {})".format( sample1.audio_format, sample2.audio_format ) ) if abs(sample1.duration - sample2.duration) > 0.001: fail( "Samples differ on: duration ({} and {})".format( sample1.duration, sample2.duration ) ) sample1.change_audio_type(AUDIO_TYPE_NP) sample2.change_audio_type(AUDIO_TYPE_NP) samples = [sample1, sample2] largest = np.argmax([sample1.audio.shape[0], sample2.audio.shape[0]]) smallest = (largest + 1) % 2 samples[largest].audio = samples[largest].audio[: len(samples[smallest].audio)] audio_diff = samples[largest].audio - samples[smallest].audio diff_dbfs = mean_dbfs(audio_diff) differ_msg = "Samples differ on: sample data ({:0.2f} dB difference) ".format( diff_dbfs ) equal_msg = "Samples are considered equal ({:0.2f} dB difference)".format(diff_dbfs) if CLI_ARGS.if_differ: if diff_dbfs <= CLI_ARGS.threshold: fail(equal_msg) if not CLI_ARGS.no_success_output: print(differ_msg, file=sys.stderr, flush=True) else: if diff_dbfs > CLI_ARGS.threshold: fail(differ_msg) if not CLI_ARGS.no_success_output: print(equal_msg, file=sys.stderr, flush=True) def handle_args(): parser = argparse.ArgumentParser( description="Tool for checking similarity of two samples" ) parser.add_argument("sample1", help="Filename of sample 1 to compare") parser.add_argument("sample2", help="Filename of sample 2 to compare") parser.add_argument( "--threshold", type=float, default=-60.0, help="dB of sample deltas above which they are considered different", ) parser.add_argument( "--if-differ", action="store_true", help="If to succeed and return status code 0 on different signals and fail on equal ones (inverse check)." "This will still fail on different formats or durations.", ) parser.add_argument( "--no-success-output", action="store_true", help="Stay silent on success (if samples are equal of - with --if-differ - samples are not equal)", ) return parser.parse_args() if __name__ == "__main__": CLI_ARGS = handle_args() compare_samples()
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/mapped-file.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_MAPPED_FILE_H_ #define FST_MAPPED_FILE_H_ #include <cstddef> #include <istream> #include <string> #include <fst/compat.h> #include <fst/flags.h> namespace fst { // A memory region is a simple abstraction for allocated memory or data from // memory-mapped files. If mmap is null, then data represents an owned region // of size bytes. Otherwise, mmap and size refer to the mapping and data is a // casted pointer to a region contained within [mmap, mmap + size). If size is // 0, then mmap and data refer to a block of memory managed externally by some // other allocator. The offset is used when allocating memory to providing // padding for alignment. struct MemoryRegion { void *data; void *mmap; size_t size; int offset; }; class MappedFile { public: ~MappedFile(); void *mutable_data() const { return region_.data; } const void *data() const { return region_.data; } // Returns a MappedFile object that contains the contents of the input stream // strm starting from the current file position with size bytes. The memorymap // bool is advisory, and Map will default to allocating and reading. The // source argument needs to contain the filename that was used to open the // input stream. static MappedFile *Map(std::istream *istrm, bool memorymap, const string &source, size_t size); // Creates a MappedFile object with a new[]'ed block of memory of size. The // align argument can be used to specify a desired block alignment. // This is RECOMMENDED FOR INTERNAL USE ONLY as it may change in future // releases. static MappedFile *Allocate(size_t size, int align = kArchAlignment); // Creates a MappedFile object pointing to a borrowed reference to data. This // block of memory is not owned by the MappedFile object and will not be // freed. This is RECOMMENDED FOR INTERNAL USE ONLY, may change in future // releases. static MappedFile *Borrow(void *data); // Alignment required for mapping structures in bytes. Regions of memory that // are not aligned upon a 128-bit boundary are read from the file instead. // This is consistent with the alignment boundary set in ConstFst and // CompactFst. static constexpr int kArchAlignment = 16; static constexpr size_t kMaxReadChunk = 256 * 1024 * 1024; // 256 MB. private: explicit MappedFile(const MemoryRegion &region); MemoryRegion region_; MappedFile(const MappedFile &) = delete; MappedFile &operator=(const MappedFile &) = delete; }; } // namespace fst #endif // FST_MAPPED_FILE_H_
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/wasm/package.json.in
{ "name" : "$(PROJECT_NAME)", "version" : "$(PROJECT_VERSION)", "description": "A Webassembly build for doing speech recognition using a Coqui STT model", "main": "./dist/stt_wasm.js", "files": [ "dist" ], "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": "Coqui.ai", "license": "LGPL-2.1-only" }
0
coqui_public_repos/TTS/TTS/demos/xtts_ft_demo
coqui_public_repos/TTS/TTS/demos/xtts_ft_demo/utils/formatter.py
import os import gc import torchaudio import pandas from faster_whisper import WhisperModel from glob import glob from tqdm import tqdm import torch import torchaudio # torch.set_num_threads(1) from TTS.tts.layers.xtts.tokenizer import multilingual_cleaners torch.set_num_threads(16) import os audio_types = (".wav", ".mp3", ".flac") def list_audios(basePath, contains=None): # return the set of files that are valid return list_files(basePath, validExts=audio_types, contains=contains) def list_files(basePath, validExts=None, contains=None): # loop over the directory structure for (rootDir, dirNames, filenames) in os.walk(basePath): # loop over the filenames in the current directory for filename in filenames: # if the contains string is not none and the filename does not contain # the supplied string, then ignore the file if contains is not None and filename.find(contains) == -1: continue # determine the file extension of the current file ext = filename[filename.rfind("."):].lower() # check to see if the file is an audio and should be processed if validExts is None or ext.endswith(validExts): # construct the path to the audio and yield it audioPath = os.path.join(rootDir, filename) yield audioPath def format_audio_list(audio_files, target_language="en", out_path=None, buffer=0.2, eval_percentage=0.15, speaker_name="coqui", gradio_progress=None): audio_total_size = 0 # make sure that ooutput file exists os.makedirs(out_path, exist_ok=True) # Loading Whisper device = "cuda" if torch.cuda.is_available() else "cpu" print("Loading Whisper Model!") asr_model = WhisperModel("large-v2", device=device, compute_type="float16") metadata = {"audio_file": [], "text": [], "speaker_name": []} if gradio_progress is not None: tqdm_object = gradio_progress.tqdm(audio_files, desc="Formatting...") else: tqdm_object = tqdm(audio_files) for audio_path in tqdm_object: wav, sr = torchaudio.load(audio_path) # stereo to mono if needed if wav.size(0) != 1: wav = torch.mean(wav, dim=0, keepdim=True) wav = wav.squeeze() audio_total_size += (wav.size(-1) / sr) segments, _ = asr_model.transcribe(audio_path, word_timestamps=True, language=target_language) segments = list(segments) i = 0 sentence = "" sentence_start = None first_word = True # added all segments words in a unique list words_list = [] for _, segment in enumerate(segments): words = list(segment.words) words_list.extend(words) # process each word for word_idx, word in enumerate(words_list): if first_word: sentence_start = word.start # If it is the first sentence, add buffer or get the begining of the file if word_idx == 0: sentence_start = max(sentence_start - buffer, 0) # Add buffer to the sentence start else: # get previous sentence end previous_word_end = words_list[word_idx - 1].end # add buffer or get the silence midle between the previous sentence and the current one sentence_start = max(sentence_start - buffer, (previous_word_end + sentence_start)/2) sentence = word.word first_word = False else: sentence += word.word if word.word[-1] in ["!", ".", "?"]: sentence = sentence[1:] # Expand number and abbreviations plus normalization sentence = multilingual_cleaners(sentence, target_language) audio_file_name, _ = os.path.splitext(os.path.basename(audio_path)) audio_file = f"wavs/{audio_file_name}_{str(i).zfill(8)}.wav" # Check for the next word's existence if word_idx + 1 < len(words_list): next_word_start = words_list[word_idx + 1].start else: # If don't have more words it means that it is the last sentence then use the audio len as next word start next_word_start = (wav.shape[0] - 1) / sr # Average the current word end and next word start word_end = min((word.end + next_word_start) / 2, word.end + buffer) absoulte_path = os.path.join(out_path, audio_file) os.makedirs(os.path.dirname(absoulte_path), exist_ok=True) i += 1 first_word = True audio = wav[int(sr*sentence_start):int(sr*word_end)].unsqueeze(0) # if the audio is too short ignore it (i.e < 0.33 seconds) if audio.size(-1) >= sr/3: torchaudio.save(absoulte_path, audio, sr ) else: continue metadata["audio_file"].append(audio_file) metadata["text"].append(sentence) metadata["speaker_name"].append(speaker_name) df = pandas.DataFrame(metadata) df = df.sample(frac=1) num_val_samples = int(len(df)*eval_percentage) df_eval = df[:num_val_samples] df_train = df[num_val_samples:] df_train = df_train.sort_values('audio_file') train_metadata_path = os.path.join(out_path, "metadata_train.csv") df_train.to_csv(train_metadata_path, sep="|", index=False) eval_metadata_path = os.path.join(out_path, "metadata_eval.csv") df_eval = df_eval.sort_values('audio_file') df_eval.to_csv(eval_metadata_path, sep="|", index=False) # deallocate VRAM and RAM del asr_model, df_train, df_eval, df, metadata gc.collect() return train_metadata_path, eval_metadata_path, audio_total_size
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/run-ci-ldc93s1-flac.sh
#!/bin/sh set -xe if [ ! -f train.py ]; then echo "Please make sure you run this from STT's top level directory." exit 1 fi; if [ ! -f "data/smoke_test/ldc93s1.csv" ]; then echo "Downloading and preprocessing LDC93S1 example data, saving in ./data/smoke_test." python -u bin/import_ldc93s1.py ./data/smoke_test fi; checkpoint_dir="$HOME/.local/share/stt/ldc93s1" # Force only one visible device because we have a single-sample dataset # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 python -m coqui_stt_training.train \ --alphabet_config_path "data/alphabet.txt" \ --show_progressbar false \ --train_files data/smoke_test/ldc93s1_flac.csv \ --test_files data/smoke_test/ldc93s1_flac.csv \ --train_batch_size 1 \ --test_batch_size 1 \ --n_hidden 100 \ --epochs 200 \ --checkpoint_dir "$checkpoint_dir" \ "$@"
0
coqui_public_repos/STT/native_client/kenlm/lm
coqui_public_repos/STT/native_client/kenlm/lm/interpolate/merge_probabilities.hh
#ifndef LM_INTERPOLATE_MERGE_PROBABILITIES_H #define LM_INTERPOLATE_MERGE_PROBABILITIES_H #include "../common/ngram.hh" #include "bounded_sequence_encoding.hh" #include "../../util/fixed_array.hh" #include "../../util/stream/multi_stream.hh" #include <stdint.h> namespace lm { namespace interpolate { struct InterpolateInfo; /** * Make the encoding of backoff values for a given order. This stores values * in [PartialProbGamma::FromBegin(), PartialProbGamma::FromEnd()) */ BoundedSequenceEncoding MakeEncoder(const InterpolateInfo &info, uint8_t order); /** * The first pass for the offline log-linear interpolation algorithm. This * reads K **suffix-ordered** streams for each model, for each order, of * ngram records (ngram-id, prob, backoff). It further assumes that the * ngram-ids have been unified over all of the stream inputs. * * Its output is records of (ngram-id, prob-prod, backoff-level, * backoff-level, ...) where the backoff-levels (of which there are K) are * the context length (0 for unigrams) that the corresponding model had to * back off to in order to obtain a probability for that ngram-id. Each of * these streams is terminated with a record whose ngram-id is all * maximum-integers for simplicity in implementation here. * * @param model_by_order An array of length N (max_i N_i) containing at * the ChainPositions for the streams for order (i + 1). * The Rus attached to output chains for each order (of length K) */ class MergeProbabilities { public: MergeProbabilities(const InterpolateInfo &info, util::FixedArray<util::stream::ChainPositions> &models_by_order) : info_(info), models_by_order_(models_by_order) {} void Run(const util::stream::ChainPositions &outputs); private: const InterpolateInfo &info_; util::FixedArray<util::stream::ChainPositions> &models_by_order_; }; /** * This class represents the output payload for this pass, which consists * of an ngram-id, a probability, and then a vector of orders from which * each of the component models backed off to for this ngram, encoded * using the BoundedSequenceEncoding class. */ class PartialProbGamma : public lm::NGramHeader { public: PartialProbGamma(std::size_t order, std::size_t backoff_bytes) : lm::NGramHeader(NULL, order), backoff_bytes_(backoff_bytes) { // nothing } std::size_t TotalSize() const { return sizeof(WordIndex) * Order() + sizeof(After) + backoff_bytes_; } // TODO: cache bounded sequence encoding in the pipeline? static std::size_t TotalSize(const InterpolateInfo &info, uint8_t order) { return sizeof(WordIndex) * order + sizeof(After) + MakeEncoder(info, order).EncodedLength(); } float &Prob() { return Pay().prob; } float Prob() const { return Pay().prob; } float &LowerProb() { return Pay().lower_prob; } float LowerProb() const { return Pay().lower_prob; } const uint8_t *FromBegin() const { return Pay().from; } uint8_t *FromBegin() { return Pay().from; } private: struct After { // Note that backoff_and_normalize assumes this comes first. float prob; float lower_prob; uint8_t from[]; }; const After &Pay() const { return *reinterpret_cast<const After *>(end()); } After &Pay() { return *reinterpret_cast<After*>(end()); } std::size_t backoff_bytes_; }; }} // namespaces #endif // LM_INTERPOLATE_MERGE_PROBABILITIES_H
0
coqui_public_repos/STT/native_client/kenlm/lm
coqui_public_repos/STT/native_client/kenlm/lm/common/print.hh
#ifndef LM_COMMON_PRINT_H #define LM_COMMON_PRINT_H #include "../word_index.hh" #include "../../util/mmap.hh" #include "../../util/string_piece.hh" #include <cassert> #include <vector> namespace util { namespace stream { class ChainPositions; }} // Warning: PrintARPA routines read all unigrams before all bigrams before all // trigrams etc. So if other parts of the chain move jointly, you'll have to // buffer. namespace lm { class VocabReconstitute { public: // fd must be alive for life of this object; does not take ownership. explicit VocabReconstitute(int fd); const char *Lookup(WordIndex index) const { assert(index < map_.size() - 1); return map_[index]; } StringPiece LookupPiece(WordIndex index) const { return StringPiece(map_[index], map_[index + 1] - 1 - map_[index]); } std::size_t Size() const { // There's an extra entry to support StringPiece lengths. return map_.size() - 1; } private: util::scoped_memory memory_; std::vector<const char*> map_; }; class PrintARPA { public: // Does not take ownership of vocab_fd or out_fd. explicit PrintARPA(int vocab_fd, int out_fd, const std::vector<uint64_t> &counts) : vocab_fd_(vocab_fd), out_fd_(out_fd), counts_(counts) {} void Run(const util::stream::ChainPositions &positions); private: int vocab_fd_; int out_fd_; std::vector<uint64_t> counts_; }; } // namespace lm #endif // LM_COMMON_PRINT_H
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/tortoise/diffusion_decoder.py
import math import random from abc import abstractmethod import torch import torch.nn as nn import torch.nn.functional as F from torch import autocast from TTS.tts.layers.tortoise.arch_utils import AttentionBlock, normalization def is_latent(t): return t.dtype == torch.float def is_sequence(t): return t.dtype == torch.long def timestep_embedding(timesteps, dim, max_period=10000): """ Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. """ half = dim // 2 freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to( device=timesteps.device ) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding class TimestepBlock(nn.Module): @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class ResBlock(TimestepBlock): def __init__( self, channels, emb_channels, dropout, out_channels=None, dims=2, kernel_size=3, efficient_config=True, use_scale_shift_norm=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_scale_shift_norm = use_scale_shift_norm padding = {1: 0, 3: 1, 5: 2}[kernel_size] eff_kernel = 1 if efficient_config else 3 eff_padding = 0 if efficient_config else 1 self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding), ) self.emb_layers = nn.Sequential( nn.SiLU(), nn.Linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding), ) if self.out_channels == channels: self.skip_connection = nn.Identity() else: self.skip_connection = nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding) def forward(self, x, emb): h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = torch.chunk(emb_out, 2, dim=1) h = out_norm(h) * (1 + scale) + shift h = out_rest(h) else: h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h class DiffusionLayer(TimestepBlock): def __init__(self, model_channels, dropout, num_heads): super().__init__() self.resblk = ResBlock( model_channels, model_channels, dropout, model_channels, dims=1, use_scale_shift_norm=True, ) self.attn = AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True) def forward(self, x, time_emb): y = self.resblk(x, time_emb) return self.attn(y) class DiffusionTts(nn.Module): def __init__( self, model_channels=512, num_layers=8, in_channels=100, in_latent_channels=512, in_tokens=8193, out_channels=200, # mean and variance dropout=0, use_fp16=False, num_heads=16, # Parameters for regularization. layer_drop=0.1, unconditioned_percentage=0.1, # This implements a mechanism similar to what is used in classifier-free training. ): super().__init__() self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels self.dropout = dropout self.num_heads = num_heads self.unconditioned_percentage = unconditioned_percentage self.enable_fp16 = use_fp16 self.layer_drop = layer_drop self.inp_block = nn.Conv1d(in_channels, model_channels, 3, 1, 1) self.time_embed = nn.Sequential( nn.Linear(model_channels, model_channels), nn.SiLU(), nn.Linear(model_channels, model_channels), ) # Either code_converter or latent_converter is used, depending on what type of conditioning data is fed. # This model is meant to be able to be trained on both for efficiency purposes - it is far less computationally # complex to generate tokens, while generating latents will normally mean propagating through a deep autoregressive # transformer network. self.code_embedding = nn.Embedding(in_tokens, model_channels) self.code_converter = nn.Sequential( AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), ) self.code_norm = normalization(model_channels) self.latent_conditioner = nn.Sequential( nn.Conv1d(in_latent_channels, model_channels, 3, padding=1), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True), ) self.contextual_embedder = nn.Sequential( nn.Conv1d(in_channels, model_channels, 3, padding=1, stride=2), nn.Conv1d(model_channels, model_channels * 2, 3, padding=1, stride=2), AttentionBlock( model_channels * 2, num_heads, relative_pos_embeddings=True, do_checkpoint=False, ), AttentionBlock( model_channels * 2, num_heads, relative_pos_embeddings=True, do_checkpoint=False, ), AttentionBlock( model_channels * 2, num_heads, relative_pos_embeddings=True, do_checkpoint=False, ), AttentionBlock( model_channels * 2, num_heads, relative_pos_embeddings=True, do_checkpoint=False, ), AttentionBlock( model_channels * 2, num_heads, relative_pos_embeddings=True, do_checkpoint=False, ), ) self.unconditioned_embedding = nn.Parameter(torch.randn(1, model_channels, 1)) self.conditioning_timestep_integrator = TimestepEmbedSequential( DiffusionLayer(model_channels, dropout, num_heads), DiffusionLayer(model_channels, dropout, num_heads), DiffusionLayer(model_channels, dropout, num_heads), ) self.integrating_conv = nn.Conv1d(model_channels * 2, model_channels, kernel_size=1) self.mel_head = nn.Conv1d(model_channels, in_channels, kernel_size=3, padding=1) self.layers = nn.ModuleList( [DiffusionLayer(model_channels, dropout, num_heads) for _ in range(num_layers)] + [ ResBlock( model_channels, model_channels, dropout, dims=1, use_scale_shift_norm=True, ) for _ in range(3) ] ) self.out = nn.Sequential( normalization(model_channels), nn.SiLU(), nn.Conv1d(model_channels, out_channels, 3, padding=1), ) def get_grad_norm_parameter_groups(self): groups = { "minicoder": list(self.contextual_embedder.parameters()), "layers": list(self.layers.parameters()), "code_converters": list(self.code_embedding.parameters()) + list(self.code_converter.parameters()) + list(self.latent_conditioner.parameters()) + list(self.latent_conditioner.parameters()), "timestep_integrator": list(self.conditioning_timestep_integrator.parameters()) + list(self.integrating_conv.parameters()), "time_embed": list(self.time_embed.parameters()), } return groups def get_conditioning(self, conditioning_input): speech_conditioning_input = ( conditioning_input.unsqueeze(1) if len(conditioning_input.shape) == 3 else conditioning_input ) conds = [] for j in range(speech_conditioning_input.shape[1]): conds.append(self.contextual_embedder(speech_conditioning_input[:, j])) conds = torch.cat(conds, dim=-1) conds = conds.mean(dim=-1) return conds def timestep_independent( self, aligned_conditioning, conditioning_latent, expected_seq_len, return_code_pred, ): # Shuffle aligned_latent to BxCxS format if is_latent(aligned_conditioning): aligned_conditioning = aligned_conditioning.permute(0, 2, 1) cond_scale, cond_shift = torch.chunk(conditioning_latent, 2, dim=1) if is_latent(aligned_conditioning): code_emb = self.latent_conditioner(aligned_conditioning) else: code_emb = self.code_embedding(aligned_conditioning).permute(0, 2, 1) code_emb = self.code_converter(code_emb) code_emb = self.code_norm(code_emb) * (1 + cond_scale.unsqueeze(-1)) + cond_shift.unsqueeze(-1) unconditioned_batches = torch.zeros((code_emb.shape[0], 1, 1), device=code_emb.device) # Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance. if self.training and self.unconditioned_percentage > 0: unconditioned_batches = ( torch.rand((code_emb.shape[0], 1, 1), device=code_emb.device) < self.unconditioned_percentage ) code_emb = torch.where( unconditioned_batches, self.unconditioned_embedding.repeat(aligned_conditioning.shape[0], 1, 1), code_emb, ) expanded_code_emb = F.interpolate(code_emb, size=expected_seq_len, mode="nearest") if not return_code_pred: return expanded_code_emb else: mel_pred = self.mel_head(expanded_code_emb) # Multiply mel_pred by !unconditioned_branches, which drops the gradient on unconditioned branches. This is because we don't want that gradient being used to train parameters through the codes_embedder as it unbalances contributions to that network from the MSE loss. mel_pred = mel_pred * unconditioned_batches.logical_not() return expanded_code_emb, mel_pred def forward( self, x, timesteps, aligned_conditioning=None, conditioning_latent=None, precomputed_aligned_embeddings=None, conditioning_free=False, return_code_pred=False, ): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param aligned_conditioning: an aligned latent or sequence of tokens providing useful data about the sample to be produced. :param conditioning_latent: a pre-computed conditioning latent; see get_conditioning(). :param precomputed_aligned_embeddings: Embeddings returned from self.timestep_independent() :param conditioning_free: When set, all conditioning inputs (including tokens and conditioning_input) will not be considered. :return: an [N x C x ...] Tensor of outputs. """ assert precomputed_aligned_embeddings is not None or ( aligned_conditioning is not None and conditioning_latent is not None ) assert not ( return_code_pred and precomputed_aligned_embeddings is not None ) # These two are mutually exclusive. unused_params = [] if conditioning_free: code_emb = self.unconditioned_embedding.repeat(x.shape[0], 1, x.shape[-1]) unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters())) unused_params.extend(list(self.latent_conditioner.parameters())) else: if precomputed_aligned_embeddings is not None: code_emb = precomputed_aligned_embeddings else: code_emb, mel_pred = self.timestep_independent( aligned_conditioning, conditioning_latent, x.shape[-1], True ) if is_latent(aligned_conditioning): unused_params.extend( list(self.code_converter.parameters()) + list(self.code_embedding.parameters()) ) else: unused_params.extend(list(self.latent_conditioner.parameters())) unused_params.append(self.unconditioned_embedding) time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) code_emb = self.conditioning_timestep_integrator(code_emb, time_emb) x = self.inp_block(x) x = torch.cat([x, code_emb], dim=1) x = self.integrating_conv(x) for i, lyr in enumerate(self.layers): # Do layer drop where applicable. Do not drop first and last layers. if ( self.training and self.layer_drop > 0 and i != 0 and i != (len(self.layers) - 1) and random.random() < self.layer_drop ): unused_params.extend(list(lyr.parameters())) else: # First and last blocks will have autocast disabled for improved precision. with autocast(x.device.type, enabled=self.enable_fp16 and i != 0): x = lyr(x, time_emb) x = x.float() out = self.out(x) # Involve probabilistic or possibly unused parameters in loss so we don't get DDP errors. extraneous_addition = 0 for p in unused_params: extraneous_addition = extraneous_addition + p.mean() out = out + extraneous_addition * 0 if return_code_pred: return out, mel_pred return out if __name__ == "__main__": clip = torch.randn(2, 100, 400) aligned_latent = torch.randn(2, 388, 512) aligned_sequence = torch.randint(0, 8192, (2, 100)) cond = torch.randn(2, 100, 400) ts = torch.LongTensor([600, 600]) model = DiffusionTts(512, layer_drop=0.3, unconditioned_percentage=0.5) # Test with latent aligned conditioning # o = model(clip, ts, aligned_latent, cond) # Test with sequence aligned conditioning o = model(clip, ts, aligned_sequence, cond)
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/linear/fstlinear.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/extensions/linear/linearscript.h> #include <fst/flags.h> DEFINE_string(arc_type, "standard", "Output arc type"); DEFINE_string(epsilon_symbol, "<eps>", "Epsilon symbol"); DEFINE_string(unknown_symbol, "<unk>", "Unknown word symbol"); DEFINE_string(vocab, "", "Path to the vocabulary file"); DEFINE_string(out, "", "Path to the output binary"); DEFINE_string(save_isymbols, "", "Save input symbol table to file"); DEFINE_string(save_fsymbols, "", "Save feature symbol table to file"); DEFINE_string(save_osymbols, "", "Save output symbol table to file"); int main(int argc, char **argv) { // TODO(wuke): more detailed usage std::set_new_handler(FailedNewHandler); SET_FLAGS(argv[0], &argc, &argv, true); fst::script::ValidateDelimiter(); fst::script::ValidateEmptySymbol(); if (argc == 1) { ShowUsage(); return 1; } fst::script::LinearCompile(FLAGS_arc_type, FLAGS_epsilon_symbol, FLAGS_unknown_symbol, FLAGS_vocab, argv + 1, argc - 1, FLAGS_out, FLAGS_save_isymbols, FLAGS_save_fsymbols, FLAGS_save_osymbols); }
0
coqui_public_repos/STT/native_client/java/app/src
coqui_public_repos/STT/native_client/java/app/src/main/AndroidManifest.xml
<?xml version="1.0" encoding="utf-8"?> <manifest xmlns:android="http://schemas.android.com/apk/res/android" package="ai.coqui.sttexampleapp"> <application android:allowBackup="true" android:icon="@mipmap/ic_launcher" android:label="@string/app_name" android:roundIcon="@mipmap/ic_launcher_round" android:supportsRtl="true" android:theme="@style/AppTheme"> <activity android:name=".STTActivity"> <intent-filter> <action android:name="android.intent.action.MAIN" /> <category android:name="android.intent.category.LAUNCHER" /> </intent-filter> </activity> </application> <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" /> </manifest>
0
coqui_public_repos/STT-examples/net_framework/STTWPF
coqui_public_repos/STT-examples/net_framework/STTWPF/Properties/Settings.Designer.cs
//------------------------------------------------------------------------------ // <auto-generated> // This code was generated by a tool. // Runtime Version:4.0.30319.42000 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> //------------------------------------------------------------------------------ namespace STT.WPF.Properties { [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "15.9.0.0")] internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase { private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings()))); public static Settings Default { get { return defaultInstance; } } } }
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_38_8k-linux-amd64-prod_pbmodel-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-cpu-opt" args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python-tests-prod.sh 3.8.1: 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU Python v3.8 prod tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on Python v3.8 on prod model, CPU only, optimized version (8kHz)"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/lib/symbol-table.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes to provide symbol-to-integer and integer-to-symbol mappings. #include <fst/symbol-table.h> #include <fst/flags.h> #include <fst/log.h> #include <fstream> #include <fst/util.h> DEFINE_bool(fst_compat_symbols, true, "Require symbol tables to match when appropriate"); DEFINE_string(fst_field_separator, "\t ", "Set of characters used as a separator between printed fields"); namespace fst { // Maximum line length in textual symbols file. static constexpr int kLineLen = 8096; // Identifies stream data as a symbol table (and its endianity). static constexpr int32 kSymbolTableMagicNumber = 2125658996; SymbolTableTextOptions::SymbolTableTextOptions(bool allow_negative_labels) : allow_negative_labels(allow_negative_labels), fst_field_separator(FLAGS_fst_field_separator) {} namespace internal { SymbolTableImpl *SymbolTableImpl::ReadText(std::istream &strm, const string &filename, const SymbolTableTextOptions &opts) { std::unique_ptr<SymbolTableImpl> impl(new SymbolTableImpl(filename)); int64 nline = 0; char line[kLineLen]; while (!strm.getline(line, kLineLen).fail()) { ++nline; std::vector<char *> col; auto separator = opts.fst_field_separator + "\n"; SplitString(line, separator.c_str(), &col, true); if (col.empty()) continue; // Empty line. if (col.size() != 2) { LOG(ERROR) << "SymbolTable::ReadText: Bad number of columns (" << col.size() << "), " << "file = " << filename << ", line = " << nline << ":<" << line << ">"; return nullptr; } const char *symbol = col[0]; const char *value = col[1]; char *p; const auto key = strtoll(value, &p, 10); if (p < value + strlen(value) || (!opts.allow_negative_labels && key < 0) || key == kNoSymbol) { LOG(ERROR) << "SymbolTable::ReadText: Bad non-negative integer \"" << value << "\", " << "file = " << filename << ", line = " << nline; return nullptr; } impl->AddSymbol(symbol, key); } return impl.release(); } void SymbolTableImpl::MaybeRecomputeCheckSum() const { { ReaderMutexLock check_sum_lock(&check_sum_mutex_); if (check_sum_finalized_) return; } // We'll acquire an exclusive lock to recompute the checksums. MutexLock check_sum_lock(&check_sum_mutex_); if (check_sum_finalized_) { // Another thread (coming in around the same time return; // might have done it already). So we recheck. } // Calculates the original label-agnostic checksum. CheckSummer check_sum; for (size_t i = 0; i < symbols_.size(); ++i) { const auto &symbol = symbols_.GetSymbol(i); check_sum.Update(symbol.data(), symbol.size()); check_sum.Update("", 1); } check_sum_string_ = check_sum.Digest(); // Calculates the safer, label-dependent checksum. CheckSummer labeled_check_sum; for (int64 i = 0; i < dense_key_limit_; ++i) { std::ostringstream line; line << symbols_.GetSymbol(i) << '\t' << i; labeled_check_sum.Update(line.str().data(), line.str().size()); } using citer = map<int64, int64>::const_iterator; for (citer it = key_map_.begin(); it != key_map_.end(); ++it) { // TODO(tombagby, 2013-11-22) This line maintains a bug that ignores // negative labels in the checksum that too many tests rely on. if (it->first < dense_key_limit_) continue; std::ostringstream line; line << symbols_.GetSymbol(it->second) << '\t' << it->first; labeled_check_sum.Update(line.str().data(), line.str().size()); } labeled_check_sum_string_ = labeled_check_sum.Digest(); check_sum_finalized_ = true; } int64 SymbolTableImpl::AddSymbol(const string &symbol, int64 key) { if (key == kNoSymbol) return key; const std::pair<int64, bool> &insert_key = symbols_.InsertOrFind(symbol); if (!insert_key.second) { auto key_already = GetNthKey(insert_key.first); if (key_already == key) return key; VLOG(1) << "SymbolTable::AddSymbol: symbol = " << symbol << " already in symbol_map_ with key = " << key_already << " but supplied new key = " << key << " (ignoring new key)"; return key_already; } if (key == (symbols_.size() - 1) && key == dense_key_limit_) { ++dense_key_limit_; } else { idx_key_.push_back(key); key_map_[key] = symbols_.size() - 1; } if (key >= available_key_) available_key_ = key + 1; check_sum_finalized_ = false; return key; } // TODO(rybach): Consider a more efficient implementation which re-uses holes in // the dense-key range or re-arranges the dense-key range from time to time. void SymbolTableImpl::RemoveSymbol(const int64 key) { auto idx = key; if (key < 0 || key >= dense_key_limit_) { auto iter = key_map_.find(key); if (iter == key_map_.end()) return; idx = iter->second; key_map_.erase(iter); } if (idx < 0 || idx >= symbols_.size()) return; symbols_.RemoveSymbol(idx); // Removed one symbol, all indexes > idx are shifted by -1. for (auto &k : key_map_) { if (k.second > idx) --k.second; } if (key >= 0 && key < dense_key_limit_) { // Removal puts a hole in the dense key range. Adjusts range to [0, key). const auto new_dense_key_limit = key; for (int64 i = key + 1; i < dense_key_limit_; ++i) { key_map_[i] = i - 1; } // Moves existing values in idx_key to new place. idx_key_.resize(symbols_.size() - new_dense_key_limit); for (int64 i = symbols_.size(); i >= dense_key_limit_; --i) { idx_key_[i - new_dense_key_limit - 1] = idx_key_[i - dense_key_limit_]; } // Adds indexes for previously dense keys. for (int64 i = new_dense_key_limit; i < dense_key_limit_ - 1; ++i) { idx_key_[i - new_dense_key_limit] = i + 1; } dense_key_limit_ = new_dense_key_limit; } else { // Remove entry for removed index in idx_key. for (int64 i = idx - dense_key_limit_; i < idx_key_.size() - 1; ++i) { idx_key_[i] = idx_key_[i + 1]; } idx_key_.pop_back(); } if (key == available_key_ - 1) available_key_ = key; } SymbolTableImpl *SymbolTableImpl::Read(std::istream &strm, const SymbolTableReadOptions &opts) { int32 magic_number = 0; ReadType(strm, &magic_number); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } string name; ReadType(strm, &name); std::unique_ptr<SymbolTableImpl> impl(new SymbolTableImpl(name)); ReadType(strm, &impl->available_key_); int64 size; ReadType(strm, &size); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } string symbol; int64 key; impl->check_sum_finalized_ = false; for (int64 i = 0; i < size; ++i) { ReadType(strm, &symbol); ReadType(strm, &key); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } impl->AddSymbol(symbol, key); } return impl.release(); } bool SymbolTableImpl::Write(std::ostream &strm) const { WriteType(strm, kSymbolTableMagicNumber); WriteType(strm, name_); WriteType(strm, available_key_); int64 size = symbols_.size(); WriteType(strm, size); for (int64 i = 0; i < size; ++i) { auto key = (i < dense_key_limit_) ? i : idx_key_[i - dense_key_limit_]; WriteType(strm, symbols_.GetSymbol(i)); WriteType(strm, key); } strm.flush(); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Write: Write failed"; return false; } return true; } } // namespace internal void SymbolTable::AddTable(const SymbolTable &table) { MutateCheck(); for (SymbolTableIterator iter(table); !iter.Done(); iter.Next()) { impl_->AddSymbol(iter.Symbol()); } } bool SymbolTable::WriteText(std::ostream &strm, const SymbolTableTextOptions &opts) const { if (opts.fst_field_separator.empty()) { LOG(ERROR) << "Missing required field separator"; return false; } bool once_only = false; for (SymbolTableIterator iter(*this); !iter.Done(); iter.Next()) { std::ostringstream line; if (iter.Value() < 0 && !opts.allow_negative_labels && !once_only) { LOG(WARNING) << "Negative symbol table entry when not allowed"; once_only = true; } line << iter.Symbol() << opts.fst_field_separator[0] << iter.Value() << '\n'; strm.write(line.str().data(), line.str().length()); } return true; } namespace internal { DenseSymbolMap::DenseSymbolMap() : empty_(-1), buckets_(1 << 4), hash_mask_(buckets_.size() - 1) { std::uninitialized_fill(buckets_.begin(), buckets_.end(), empty_); } DenseSymbolMap::DenseSymbolMap(const DenseSymbolMap &x) : empty_(-1), symbols_(x.symbols_.size()), buckets_(x.buckets_), hash_mask_(x.hash_mask_) { for (size_t i = 0; i < symbols_.size(); ++i) { const auto sz = strlen(x.symbols_[i]) + 1; auto *cpy = new char[sz]; memcpy(cpy, x.symbols_[i], sz); symbols_[i] = cpy; } } DenseSymbolMap::~DenseSymbolMap() { for (size_t i = 0; i < symbols_.size(); ++i) { delete[] symbols_[i]; } } std::pair<int64, bool> DenseSymbolMap::InsertOrFind(const string &key) { static constexpr float kMaxOccupancyRatio = 0.75; // Grows when 75% full. if (symbols_.size() >= kMaxOccupancyRatio * buckets_.size()) { Rehash(buckets_.size() * 2); } size_t idx = str_hash_(key) & hash_mask_; while (buckets_[idx] != empty_) { const auto stored_value = buckets_[idx]; if (!strcmp(symbols_[stored_value], key.c_str())) { return {stored_value, false}; } idx = (idx + 1) & hash_mask_; } auto next = symbols_.size(); buckets_[idx] = next; symbols_.push_back(NewSymbol(key)); return {next, true}; } int64 DenseSymbolMap::Find(const string &key) const { size_t idx = str_hash_(key) & hash_mask_; while (buckets_[idx] != empty_) { const auto stored_value = buckets_[idx]; if (!strcmp(symbols_[stored_value], key.c_str())) { return stored_value; } idx = (idx + 1) & hash_mask_; } return buckets_[idx]; } void DenseSymbolMap::Rehash(size_t num_buckets) { buckets_.resize(num_buckets); hash_mask_ = buckets_.size() - 1; std::uninitialized_fill(buckets_.begin(), buckets_.end(), empty_); for (size_t i = 0; i < symbols_.size(); ++i) { size_t idx = str_hash_(string(symbols_[i])) & hash_mask_; while (buckets_[idx] != empty_) { idx = (idx + 1) & hash_mask_; } buckets_[idx] = i; } } const char *DenseSymbolMap::NewSymbol(const string &sym) { auto num = sym.size() + 1; auto newstr = new char[num]; memcpy(newstr, sym.c_str(), num); return newstr; } void DenseSymbolMap::RemoveSymbol(size_t idx) { delete[] symbols_[idx]; symbols_.erase(symbols_.begin() + idx); Rehash(buckets_.size()); } } // namespace internal bool CompatSymbols(const SymbolTable *syms1, const SymbolTable *syms2, bool warning) { // Flag can explicitly override this check. if (!FLAGS_fst_compat_symbols) return true; if (syms1 && syms2 && (syms1->LabeledCheckSum() != syms2->LabeledCheckSum())) { if (warning) { LOG(WARNING) << "CompatSymbols: Symbol table checksums do not match. " << "Table sizes are " << syms1->NumSymbols() << " and " << syms2->NumSymbols(); } return false; } else { return true; } } void SymbolTableToString(const SymbolTable *table, string *result) { std::ostringstream ostrm; table->Write(ostrm); *result = ostrm.str(); } SymbolTable *StringToSymbolTable(const string &str) { std::istringstream istrm(str); return SymbolTable::Read(istrm, SymbolTableReadOptions()); } } // namespace fst
0
coqui_public_repos/STT/native_client/dotnet
coqui_public_repos/STT/native_client/dotnet/STTClient/STTClient.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Library</OutputType> <TargetFrameworks>netstandard2.0;netstandard2.1;net5.0;net6.0</TargetFrameworks> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> </PropertyGroup> </Project>
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests2/test_fast_pitch_speaker_emb_train.py
import glob import json import os import shutil from trainer import get_last_checkpoint from tests import get_device_id, get_tests_output_path, run_cli from TTS.config.shared_configs import BaseAudioConfig from TTS.tts.configs.fast_pitch_config import FastPitchConfig config_path = os.path.join(get_tests_output_path(), "fast_pitch_speaker_emb_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") audio_config = BaseAudioConfig( sample_rate=22050, do_trim_silence=True, trim_db=60.0, signal_norm=False, mel_fmin=0.0, mel_fmax=8000, spec_gain=1.0, log_func="np.log", ref_level_db=20, preemphasis=0.0, ) config = FastPitchConfig( audio=audio_config, batch_size=8, eval_batch_size=8, num_loader_workers=0, num_eval_loader_workers=0, text_cleaner="english_cleaners", use_phonemes=True, phoneme_language="en-us", phoneme_cache_path="tests/data/ljspeech/phoneme_cache/", f0_cache_path="tests/data/ljspeech/f0_cache/", run_eval=True, test_delay_epochs=-1, epochs=1, print_step=1, print_eval=True, use_speaker_embedding=True, test_sentences=[ "Be a voice, not an echo.", ], ) config.audio.do_trim_silence = True config.use_speaker_embedding = True config.model_args.use_speaker_embedding = True config.audio.trim_db = 60 config.save_json(config_path) # train the model for one epoch command_train = ( f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " f"--coqpit.output_path {output_path} " "--coqpit.datasets.0.formatter ljspeech_test " "--coqpit.datasets.0.meta_file_train metadata.csv " "--coqpit.datasets.0.meta_file_val metadata.csv " "--coqpit.datasets.0.path tests/data/ljspeech " "--coqpit.datasets.0.meta_file_attn_mask tests/data/ljspeech/metadata_attn_mask.txt " "--coqpit.test_delay_epochs 0" ) run_cli(command_train) # Find latest folder continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) # Inference using TTS API continue_config_path = os.path.join(continue_path, "config.json") continue_restore_path, _ = get_last_checkpoint(continue_path) out_wav_path = os.path.join(get_tests_output_path(), "output.wav") speaker_id = "ljspeech-1" continue_speakers_path = os.path.join(continue_path, "speakers.json") # Check integrity of the config with open(continue_config_path, "r", encoding="utf-8") as f: config_loaded = json.load(f) assert config_loaded["characters"] is not None assert config_loaded["output_path"] in continue_path assert config_loaded["test_delay_epochs"] == 0 # Load the model and run inference inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" run_cli(inference_command) # restore the model and continue training for one more epoch command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " run_cli(command_train) shutil.rmtree(continue_path)
0
coqui_public_repos/TTS/TTS/demos
coqui_public_repos/TTS/TTS/demos/xtts_ft_demo/requirements.txt
faster_whisper==0.9.0 gradio==4.7.1
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/union-weight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Union weight set and associated semiring operation definitions. // // TODO(riley): add in normalizer functor #ifndef FST_UNION_WEIGHT_H_ #define FST_UNION_WEIGHT_H_ #include <cstdlib> #include <iostream> #include <list> #include <sstream> #include <string> #include <utility> #include <fst/weight.h> namespace fst { // Example UnionWeightOptions for UnionWeight template below. The Merge // operation is used to collapse elements of the set and the Compare function // to efficiently implement the merge. In the simplest case, merge would just // apply with equality of set elements so the result is a set (and not a // multiset). More generally, this can be used to maintain the multiplicity or // other such weight associated with the set elements (cf. Gallic weights). // template <class W> // struct UnionWeightOptions { // // Comparison function C is a total order on W that is monotonic w.r.t. to // // Times: for all a, b,c != Zero(): C(a, b) => C(ca, cb) and is // // anti-monotonic w.r.rt to Divide: C(a, b) => C(c/b, c/a). // // // // For all a, b: only one of C(a, b), C(b, a) or a ~ b must true where // // ~ is an equivalence relation on W. Also we require a ~ b iff // // a.Reverse() ~ b.Reverse(). // using Compare = NaturalLess<W>; // // // How to combine two weights if a ~ b as above. For all a, b: a ~ b => // // merge(a, b) ~ a, Merge must define a semiring endomorphism from the // // unmerged weight sets to the merged weight sets. // struct Merge { // W operator()(const W &w1, const W &w2) const { return w1; } // }; // // // For ReverseWeight. // using ReverseOptions = UnionWeightOptions<ReverseWeight>; // }; template <class W, class O> class UnionWeight; template <class W, class O> class UnionWeightIterator; template <class W, class O> class UnionWeightReverseIterator; template <class W, class O> bool operator==(const UnionWeight<W, O> &, const UnionWeight<W, O> &); // Semiring that uses Times() and One() from W and union and the empty set // for Plus() and Zero(), respectively. Template argument O specifies the union // weight options as above. template <class W, class O> class UnionWeight { public: using Weight = W; using Compare = typename O::Compare; using Merge = typename O::Merge; using ReverseWeight = UnionWeight<typename W::ReverseWeight, typename O::ReverseOptions>; friend class UnionWeightIterator<W, O>; friend class UnionWeightReverseIterator<W, O>; friend bool operator== <>(const UnionWeight<W, O> &, const UnionWeight<W, O> &); // Sets represented as first_ weight + rest_ weights. Uses first_ as // NoWeight() to indicate the union weight Zero() ask the empty set. Uses // rest_ containing NoWeight() to indicate the union weight NoWeight(). UnionWeight() : first_(W::NoWeight()) {} explicit UnionWeight(W weight) : first_(weight) { if (weight == W::NoWeight()) rest_.push_back(weight); } static const UnionWeight<W, O> &Zero() { static const UnionWeight<W, O> zero(W::NoWeight()); return zero; } static const UnionWeight<W, O> &One() { static const UnionWeight<W, O> one(W::One()); return one; } static const UnionWeight<W, O> &NoWeight() { static const UnionWeight<W, O> no_weight(W::Zero(), W::NoWeight()); return no_weight; } static const string &Type() { static const string *const type = new string(W::Type() + "_union"); return *type; } static constexpr uint64 Properties() { return W::Properties() & (kLeftSemiring | kRightSemiring | kCommutative | kIdempotent); } bool Member() const; std::istream &Read(std::istream &strm); std::ostream &Write(std::ostream &strm) const; size_t Hash() const; UnionWeight<W, O> Quantize(float delta = kDelta) const; ReverseWeight Reverse() const; // These operations combined with the UnionWeightIterator and // UnionWeightReverseIterator provide the access and mutation of the union // weight internal elements. // Common initializer among constructors; clears existing UnionWeight. void Clear() { first_ = W::NoWeight(); rest_.clear(); } size_t Size() const { return first_.Member() ? rest_.size() + 1 : 0; } const W &Back() const { return rest_.empty() ? first_ : rest_.back(); } // When srt is true, assumes elements added sorted w.r.t Compare and merging // of weights performed as needed. Otherwise, just ensures first_ is the // least element wrt Compare. void PushBack(W weight, bool srt); // Sorts the elements of the set. Assumes that first_, if present, is the // least element. void Sort() { rest_.sort(comp_); } private: W &Back() { if (rest_.empty()) { return first_; } else { return rest_.back(); } } UnionWeight(W w1, W w2) : first_(std::move(w1)), rest_(1, std::move(w2)) {} W first_; // First weight in set. std::list<W> rest_; // Remaining weights in set. Compare comp_; Merge merge_; }; template <class W, class O> void UnionWeight<W, O>::PushBack(W weight, bool srt) { if (!weight.Member()) { rest_.push_back(std::move(weight)); } else if (!first_.Member()) { first_ = std::move(weight); } else if (srt) { auto &back = Back(); if (comp_(back, weight)) { rest_.push_back(std::move(weight)); } else { back = merge_(back, std::move(weight)); } } else { if (comp_(first_, weight)) { rest_.push_back(std::move(weight)); } else { rest_.push_back(first_); first_ = std::move(weight); } } } // Traverses union weight in the forward direction. template <class W, class O> class UnionWeightIterator { public: explicit UnionWeightIterator(const UnionWeight<W, O> &weight) : first_(weight.first_), rest_(weight.rest_), init_(true), it_(rest_.begin()) {} bool Done() const { return init_ ? !first_.Member() : it_ == rest_.end(); } const W &Value() const { return init_ ? first_ : *it_; } void Next() { if (init_) { init_ = false; } else { ++it_; } } void Reset() { init_ = true; it_ = rest_.begin(); } private: const W &first_; const std::list<W> &rest_; bool init_; // in the initialized state? typename std::list<W>::const_iterator it_; }; // Traverses union weight in backward direction. template <typename L, class O> class UnionWeightReverseIterator { public: explicit UnionWeightReverseIterator(const UnionWeight<L, O> &weight) : first_(weight.first_), rest_(weight.rest_), fin_(!first_.Member()), it_(rest_.rbegin()) {} bool Done() const { return fin_; } const L &Value() const { return it_ == rest_.rend() ? first_ : *it_; } void Next() { if (it_ == rest_.rend()) { fin_ = true; } else { ++it_; } } void Reset() { fin_ = !first_.Member(); it_ = rest_.rbegin(); } private: const L &first_; const std::list<L> &rest_; bool fin_; // in the final state? typename std::list<L>::const_reverse_iterator it_; }; // UnionWeight member functions follow that require UnionWeightIterator. template <class W, class O> inline std::istream &UnionWeight<W, O>::Read(std::istream &istrm) { Clear(); int32 size; ReadType(istrm, &size); for (int i = 0; i < size; ++i) { W weight; ReadType(istrm, &weight); PushBack(weight, true); } return istrm; } template <class W, class O> inline std::ostream &UnionWeight<W, O>::Write(std::ostream &ostrm) const { const int32 size = Size(); WriteType(ostrm, size); for (UnionWeightIterator<W, O> it(*this); !it.Done(); it.Next()) { WriteType(ostrm, it.Value()); } return ostrm; } template <class W, class O> inline bool UnionWeight<W, O>::Member() const { if (Size() <= 1) return true; for (UnionWeightIterator<W, O> it(*this); !it.Done(); it.Next()) { if (!it.Value().Member()) return false; } return true; } template <class W, class O> inline UnionWeight<W, O> UnionWeight<W, O>::Quantize(float delta) const { UnionWeight<W, O> weight; for (UnionWeightIterator<W, O> it(*this); !it.Done(); it.Next()) { weight.PushBack(it.Value().Quantize(delta), true); } return weight; } template <class W, class O> inline typename UnionWeight<W, O>::ReverseWeight UnionWeight<W, O>::Reverse() const { ReverseWeight weight; for (UnionWeightIterator<W, O> it(*this); !it.Done(); it.Next()) { weight.PushBack(it.Value().Reverse(), false); } weight.Sort(); return weight; } template <class W, class O> inline size_t UnionWeight<W, O>::Hash() const { size_t h = 0; static constexpr int lshift = 5; static constexpr int rshift = CHAR_BIT * sizeof(size_t) - lshift; for (UnionWeightIterator<W, O> it(*this); !it.Done(); it.Next()) { h = h << lshift ^ h >> rshift ^ it.Value().Hash(); } return h; } // Requires union weight has been canonicalized. template <class W, class O> inline bool operator==(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2) { if (w1.Size() != w2.Size()) return false; UnionWeightIterator<W, O> it1(w1); UnionWeightIterator<W, O> it2(w2); for (; !it1.Done(); it1.Next(), it2.Next()) { if (it1.Value() != it2.Value()) return false; } return true; } // Requires union weight has been canonicalized. template <class W, class O> inline bool operator!=(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2) { return !(w1 == w2); } // Requires union weight has been canonicalized. template <class W, class O> inline bool ApproxEqual(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2, float delta = kDelta) { if (w1.Size() != w2.Size()) return false; UnionWeightIterator<W, O> it1(w1); UnionWeightIterator<W, O> it2(w2); for (; !it1.Done(); it1.Next(), it2.Next()) { if (!ApproxEqual(it1.Value(), it2.Value(), delta)) return false; } return true; } template <class W, class O> inline std::ostream &operator<<(std::ostream &ostrm, const UnionWeight<W, O> &weight) { UnionWeightIterator<W, O> it(weight); if (it.Done()) { return ostrm << "EmptySet"; } else if (!weight.Member()) { return ostrm << "BadSet"; } else { CompositeWeightWriter writer(ostrm); writer.WriteBegin(); for (; !it.Done(); it.Next()) writer.WriteElement(it.Value()); writer.WriteEnd(); } return ostrm; } template <class W, class O> inline std::istream &operator>>(std::istream &istrm, UnionWeight<W, O> &weight) { string s; istrm >> s; if (s == "EmptySet") { weight = UnionWeight<W, O>::Zero(); } else if (s == "BadSet") { weight = UnionWeight<W, O>::NoWeight(); } else { weight = UnionWeight<W, O>::Zero(); std::istringstream sstrm(s); CompositeWeightReader reader(sstrm); reader.ReadBegin(); bool more = true; while (more) { W v; more = reader.ReadElement(&v); weight.PushBack(v, true); } reader.ReadEnd(); } return istrm; } template <class W, class O> inline UnionWeight<W, O> Plus(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2) { if (!w1.Member() || !w2.Member()) return UnionWeight<W, O>::NoWeight(); if (w1 == UnionWeight<W, O>::Zero()) return w2; if (w2 == UnionWeight<W, O>::Zero()) return w1; UnionWeightIterator<W, O> it1(w1); UnionWeightIterator<W, O> it2(w2); UnionWeight<W, O> sum; typename O::Compare comp; while (!it1.Done() && !it2.Done()) { const auto v1 = it1.Value(); const auto v2 = it2.Value(); if (comp(v1, v2)) { sum.PushBack(v1, true); it1.Next(); } else { sum.PushBack(v2, true); it2.Next(); } } for (; !it1.Done(); it1.Next()) sum.PushBack(it1.Value(), true); for (; !it2.Done(); it2.Next()) sum.PushBack(it2.Value(), true); return sum; } template <class W, class O> inline UnionWeight<W, O> Times(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2) { if (!w1.Member() || !w2.Member()) return UnionWeight<W, O>::NoWeight(); if (w1 == UnionWeight<W, O>::Zero() || w2 == UnionWeight<W, O>::Zero()) { return UnionWeight<W, O>::Zero(); } UnionWeightIterator<W, O> it1(w1); UnionWeightIterator<W, O> it2(w2); UnionWeight<W, O> prod1; for (; !it1.Done(); it1.Next()) { UnionWeight<W, O> prod2; for (; !it2.Done(); it2.Next()) { prod2.PushBack(Times(it1.Value(), it2.Value()), true); } prod1 = Plus(prod1, prod2); it2.Reset(); } return prod1; } template <class W, class O> inline UnionWeight<W, O> Divide(const UnionWeight<W, O> &w1, const UnionWeight<W, O> &w2, DivideType typ) { if (!w1.Member() || !w2.Member()) return UnionWeight<W, O>::NoWeight(); if (w1 == UnionWeight<W, O>::Zero() || w2 == UnionWeight<W, O>::Zero()) { return UnionWeight<W, O>::Zero(); } UnionWeightIterator<W, O> it1(w1); UnionWeightReverseIterator<W, O> it2(w2); UnionWeight<W, O> quot; if (w1.Size() == 1) { for (; !it2.Done(); it2.Next()) { quot.PushBack(Divide(it1.Value(), it2.Value(), typ), true); } } else if (w2.Size() == 1) { for (; !it1.Done(); it1.Next()) { quot.PushBack(Divide(it1.Value(), it2.Value(), typ), true); } } else { quot = UnionWeight<W, O>::NoWeight(); } return quot; } // This function object generates weights over the union of weights for the // underlying generators for the template weight types. This is intended // primarily for testing. template <class W, class O> class WeightGenerate<UnionWeight<W, O>> { public: using Weight = UnionWeight<W, O>; using Generate = WeightGenerate<W>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : generate_(false), allow_zero_(allow_zero), num_random_weights_(num_random_weights) {} Weight operator()() const { const int n = rand() % (num_random_weights_ + 1); // NOLINT if (allow_zero_ && n == num_random_weights_) { return Weight::Zero(); } else if (n % 2 == 0) { return Weight(generate_()); } else { return Plus(Weight(generate_()), Weight(generate_())); } } private: Generate generate_; // Permits Zero() and zero divisors. bool allow_zero_; // The number of alternative random weights. const size_t num_random_weights_; }; } // namespace fst #endif // FST_UNION_WEIGHT_H_
0
coqui_public_repos/TTS/TTS/tts/utils
coqui_public_repos/TTS/TTS/tts/utils/text/__init__.py
from TTS.tts.utils.text.tokenizer import TTSTokenizer
0
coqui_public_repos/STT
coqui_public_repos/STT/native_client/tflitemodelstate.h
#ifndef TFLITEMODELSTATE_H #define TFLITEMODELSTATE_H #include <memory> #include <vector> #include "tensorflow/lite/model.h" #include "modelstate.h" struct TFLiteModelState : public ModelState { std::unique_ptr<tflite::Interpreter> interpreter_; std::unique_ptr<tflite::FlatBufferModel> fbmodel_; int input_node_idx_; int previous_state_c_idx_; int previous_state_h_idx_; int input_samples_idx_; int logits_idx_; int new_state_c_idx_; int new_state_h_idx_; int mfccs_idx_; std::vector<int> acoustic_exec_plan_; std::vector<int> mfcc_exec_plan_; TFLiteModelState(); virtual ~TFLiteModelState(); virtual int init(const char* model_string, bool init_from_bytes, size_t bufferSize) override; virtual void compute_mfcc(const std::vector<float>& audio_buffer, std::vector<float>& mfcc_output) override; virtual void infer(const std::vector<float>& mfcc, unsigned int n_frames, const std::vector<float>& previous_state_c, const std::vector<float>& previous_state_h, std::vector<float>& logits_output, std::vector<float>& state_c_output, std::vector<float>& state_h_output) override; private: int get_tensor_by_name(const std::vector<int>& list, const char* name); int get_input_tensor_by_name(const char* name); int get_output_tensor_by_name(const char* name); std::vector<int> find_parent_node_ids(int tensor_id); void copy_vector_to_tensor(const std::vector<float>& vec, int tensor_idx, int num_elements); void copy_tensor_to_vector(int tensor_idx, int num_elements, std::vector<float>& vec); }; #endif // TFLITEMODELSTATE_H
0
coqui_public_repos/TTS/docs
coqui_public_repos/TTS/docs/source/faq.md
# Humble FAQ We tried to collect common issues and questions we receive about 🐸TTS. It is worth checking before going deeper. ## Errors with a pre-trained model. How can I resolve this? - Make sure you use the right commit version of 🐸TTS. Each pre-trained model has its corresponding version that needs to be used. It is defined on the model table. - If it is still problematic, post your problem on [Discussions](https://github.com/coqui-ai/TTS/discussions). Please give as many details as possible (error message, your TTS version, your TTS model and config.json etc.) - If you feel like it's a bug to be fixed, then prefer Github issues with the same level of scrutiny. ## What are the requirements of a good 🐸TTS dataset? * {ref}`See this page <what_makes_a_good_dataset>` ## How should I choose the right model? - First, train Tacotron. It is smaller and faster to experiment with. If it performs poorly, try Tacotron2. - Tacotron models produce the most natural voice if your dataset is not too noisy. - If both models do not perform well and especially the attention does not align, then try AlignTTS or GlowTTS. - If you need faster models, consider SpeedySpeech, GlowTTS or AlignTTS. Keep in mind that SpeedySpeech requires a pre-trained Tacotron or Tacotron2 model to compute text-to-speech alignments. ## How can I train my own `tts` model? 0. Check your dataset with notebooks in [dataset_analysis](https://github.com/coqui-ai/TTS/tree/master/notebooks/dataset_analysis) folder. Use [this notebook](https://github.com/coqui-ai/TTS/blob/master/notebooks/dataset_analysis/CheckSpectrograms.ipynb) to find the right audio processing parameters. A better set of parameters results in a better audio synthesis. 1. Write your own dataset `formatter` in `datasets/formatters.py` or format your dataset as one of the supported datasets, like LJSpeech. A `formatter` parses the metadata file and converts a list of training samples. 2. If you have a dataset with a different alphabet than English, you need to set your own character list in the ```config.json```. - If you use phonemes for training and your language is supported [here](https://github.com/rhasspy/gruut#supported-languages), you don't need to set your character list. - You can use `TTS/bin/find_unique_chars.py` to get characters used in your dataset. 3. Write your own text cleaner in ```utils.text.cleaners```. It is not always necessary, except when you have a different alphabet or language-specific requirements. - A `cleaner` performs number and abbreviation expansion and text normalization. Basically, it converts the written text to its spoken format. - If you go lazy, you can try using ```basic_cleaners```. 4. Fill in a ```config.json```. Go over each parameter one by one and consider it regarding the appended explanation. - Check the `Coqpit` class created for your target model. Coqpit classes for `tts` models are under `TTS/tts/configs/`. - You just need to define fields you need/want to change in your `config.json`. For the rest, their default values are used. - 'sample_rate', 'phoneme_language' (if phoneme enabled), 'output_path', 'datasets', 'text_cleaner' are the fields you need to edit in most of the cases. - Here is a sample `config.json` for training a `GlowTTS` network. ```json { "model": "glow_tts", "batch_size": 32, "eval_batch_size": 16, "num_loader_workers": 4, "num_eval_loader_workers": 4, "run_eval": true, "test_delay_epochs": -1, "epochs": 1000, "text_cleaner": "english_cleaners", "use_phonemes": false, "phoneme_language": "en-us", "phoneme_cache_path": "phoneme_cache", "print_step": 25, "print_eval": true, "mixed_precision": false, "output_path": "recipes/ljspeech/glow_tts/", "test_sentences": ["Test this sentence.", "This test sentence.", "Sentence this test."], "datasets":[{"formatter": "ljspeech", "meta_file_train":"metadata.csv", "path": "recipes/ljspeech/LJSpeech-1.1/"}] } ``` 6. Train your model. - SingleGPU training: ```CUDA_VISIBLE_DEVICES="0" python train_tts.py --config_path config.json``` - MultiGPU training: ```python3 -m trainer.distribute --gpus "0,1" --script TTS/bin/train_tts.py --config_path config.json``` **Note:** You can also train your model using pure 🐍 python. Check ```{eval-rst} :ref: 'tutorial_for_nervous_beginners'```. ## How can I train in a different language? - Check steps 2, 3, 4, 5 above. ## How can I train multi-GPUs? - Check step 5 above. ## How can I check model performance? - You can inspect model training and performance using ```tensorboard```. It will show you loss, attention alignment, model output. Go with the order below to measure the model performance. 1. Check ground truth spectrograms. If they do not look as they are supposed to, then check audio processing parameters in ```config.json```. 2. Check train and eval losses and make sure that they all decrease smoothly in time. 3. Check model spectrograms. Especially, training outputs should look similar to ground truth spectrograms after ~10K iterations. 4. Your model would not work well at test time until the attention has a near diagonal alignment. This is the sublime art of TTS training. - Attention should converge diagonally after ~50K iterations. - If attention does not converge, the probabilities are; - Your dataset is too noisy or small. - Samples are too long. - Batch size is too small (batch_size < 32 would be having a hard time converging) - You can also try other attention algorithms like 'graves', 'bidirectional_decoder', 'forward_attn'. - 'bidirectional_decoder' is your ultimate savior, but it trains 2x slower and demands 1.5x more GPU memory. - You can also try the other models like AlignTTS or GlowTTS. ## How do I know when to stop training? There is no single objective metric to decide the end of a training since the voice quality is a subjective matter. In our model trainings, we follow these steps; - Check test time audio outputs, if it does not improve more. - Check test time attention maps, if they look clear and diagonal. - Check validation loss, if it converged and smoothly went down or started to overfit going up. - If the answer is YES for all of the above, then test the model with a set of complex sentences. For English, you can use the `TestAttention` notebook. Keep in mind that the approach above only validates the model robustness. It is hard to estimate the voice quality without asking the actual people. The best approach is to pick a set of promising models and run a Mean-Opinion-Score study asking actual people to score the models. ## My model does not learn. How can I debug? - Go over the steps under "How can I check model performance?" ## Attention does not align. How can I make it work? - Check the 4th step under "How can I check model performance?" ## How can I test a trained model? - The best way is to use `tts` or `tts-server` commands. For details check {ref}`here <synthesizing_speech>`. - If you need to code your own ```TTS.utils.synthesizer.Synthesizer``` class. ## My Tacotron model does not stop - I see "Decoder stopped with 'max_decoder_steps" - Stopnet does not work. - In general, all of the above relates to the `stopnet`. It is the part of the model telling the `decoder` when to stop. - In general, a poor `stopnet` relates to something else that is broken in your model or dataset. Especially the attention module. - One common reason is the silent parts in the audio clips at the beginning and the ending. Check ```trim_db``` value in the config. You can find a better value for your dataset by using ```CheckSpectrogram``` notebook. If this value is too small, too much of the audio will be trimmed. If too big, then too much silence will remain. Both will curtail the `stopnet` performance.
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/framework/provider_options.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <string> #include <unordered_map> #include <vector> namespace onnxruntime { // data types for execution provider options using ProviderOptions = std::unordered_map<std::string, std::string>; using ProviderOptionsVector = std::vector<ProviderOptions>; using ProviderOptionsMap = std::unordered_map<std::string, ProviderOptions>; } // namespace onnxruntime
0
coqui_public_repos/STT
coqui_public_repos/STT/native_client/alphabet.h
#ifndef ALPHABET_H #define ALPHABET_H #include <string> #include <unordered_map> #include <vector> #include "flashlight/lib/text/dictionary/Dictionary.h" /* * Loads a text file describing a mapping of labels to strings, one string per * line. This is used by the decoder, client and Python scripts to convert the * output of the decoder to a human-readable string and vice-versa. */ class Alphabet : public fl::lib::text::Dictionary { public: Alphabet() = default; Alphabet(const Alphabet&) = default; Alphabet& operator=(const Alphabet&) = default; virtual ~Alphabet() = default; virtual int init(const char *config_file); // Initialize directly from sequence of labels. void InitFromLabels(const std::vector<std::string>& labels); // Serialize alphabet into a binary buffer. std::string Serialize(); // Serialize alphabet into a text representation (ie. config file read by `init`) std::string SerializeText(); // Deserialize alphabet from a binary buffer. int Deserialize(const char* buffer, const int buffer_size); size_t GetSize() const; bool IsSpace(unsigned int index) const { return index == space_index_; } unsigned int GetSpaceLabel() const { return space_index_; } virtual std::vector<std::string> GetLabels() const; // Returns true if the single character/output class has a corresponding index // in the alphabet. virtual bool CanEncodeSingle(const std::string& label) const; // Returns true if the entire string can be encoded with this alphabet. virtual bool CanEncode(const std::string& label) const; // Decode a single index into its label. std::string DecodeSingle(unsigned int index) const; // Encode a single character/output class into its index. Character must be in // the alphabet, this method will assert that. Use `CanEncodeSingle` to test. unsigned int EncodeSingle(const std::string& label) const; // Decode a sequence of indices into a string. std::string Decode(const std::vector<unsigned int>& indices) const; // We provide a C-style overload for accepting NumPy arrays as input, since // the NumPy library does not have built-in typemaps for std::vector<T>. std::string Decode(const unsigned int* indices, int length) const; // Encode a sequence of character/output classes into a sequence of indices. // Characters are assumed to always take a single Unicode codepoint. // Characters must be in the alphabet, this method will assert that. Use // `CanEncode` and `CanEncodeSingle` to test. virtual std::vector<unsigned int> Encode(const std::string& labels) const; protected: unsigned int space_index_; }; class UTF8Alphabet : public Alphabet { public: UTF8Alphabet() { // 255 byte values, index n -> byte value n+1 // because NUL is never used, we don't use up an index in the maps for it for (int idx = 0; idx < 255; ++idx) { std::string val(1, idx+1); addEntry(val, idx); } space_index_ = ' ' - 1; } int init(const char*) override { return 0; } bool CanEncodeSingle(const std::string& label) const override; bool CanEncode(const std::string& label) const override; std::vector<unsigned int> Encode(const std::string& label) const override; }; #endif //ALPHABET_H
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/tc-node-utils.sh
#!/bin/bash set -xe # Will inspect this task's dependencies for one that provides a matching npm package get_dep_npm_pkg_url() { local all_deps="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')" # We try "deepspeech-tflite" and "deepspeech-gpu" first and if we don't find it we try "deepspeech" for pkg_basename in "deepspeech-tflite" "deepspeech-gpu" "deepspeech"; do local deepspeech_pkg="${pkg_basename}-${DS_VERSION}.tgz" for dep in ${all_deps}; do local has_artifact=$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts | python -c 'import json; import sys; has_artifact = True in [ e["name"].find("'${deepspeech_pkg}'") > 0 for e in json.loads(sys.stdin.read())["artifacts"] ]; print(has_artifact)') if [ "${has_artifact}" = "True" ]; then echo "https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/${deepspeech_pkg}" exit 0 fi; done; done; echo "" # This should not be reached, otherwise it means we could not find a matching nodejs package exit 1 }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/queue.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes for various FST state queues with a unified interface. #ifndef FST_QUEUE_H_ #define FST_QUEUE_H_ #include <deque> #include <memory> #include <type_traits> #include <utility> #include <vector> #include <fst/log.h> #include <fst/arcfilter.h> #include <fst/connect.h> #include <fst/heap.h> #include <fst/topsort.h> namespace fst { // The Queue interface is: // // template <class S> // class Queue { // public: // using StateId = S; // // // Constructor: may need args (e.g., FST, comparator) for some queues. // Queue(...) override; // // // Returns the head of the queue. // StateId Head() const override; // // // Inserts a state. // void Enqueue(StateId s) override; // // // Removes the head of the queue. // void Dequeue() override; // // // Updates ordering of state s when weight changes, if necessary. // void Update(StateId s) override; // // // Is the queue empty? // bool Empty() const override; // // // Removes all states from the queue. // void Clear() override; // }; // State queue types. enum QueueType { TRIVIAL_QUEUE = 0, // Single state queue. FIFO_QUEUE = 1, // First-in, first-out queue. LIFO_QUEUE = 2, // Last-in, first-out queue. SHORTEST_FIRST_QUEUE = 3, // Shortest-first queue. TOP_ORDER_QUEUE = 4, // Topologically-ordered queue. STATE_ORDER_QUEUE = 5, // State ID-ordered queue. SCC_QUEUE = 6, // Component graph top-ordered meta-queue. AUTO_QUEUE = 7, // Auto-selected queue. OTHER_QUEUE = 8 }; // QueueBase, templated on the StateId, is a virtual base class shared by all // queues considered by AutoQueue. template <class S> class QueueBase { public: using StateId = S; virtual ~QueueBase() {} // Concrete implementation. explicit QueueBase(QueueType type) : queue_type_(type), error_(false) {} void SetError(bool error) { error_ = error; } bool Error() const { return error_; } QueueType Type() const { return queue_type_; } // Virtual interface. virtual StateId Head() const = 0; virtual void Enqueue(StateId) = 0; virtual void Dequeue() = 0; virtual void Update(StateId) = 0; virtual bool Empty() const = 0; virtual void Clear() = 0; private: QueueType queue_type_; bool error_; }; // Trivial queue discipline; one may enqueue at most one state at a time. It // can be used for strongly connected components with only one state and no // self-loops. template <class S> class TrivialQueue : public QueueBase<S> { public: using StateId = S; TrivialQueue() : QueueBase<StateId>(TRIVIAL_QUEUE), front_(kNoStateId) {} virtual ~TrivialQueue() = default; StateId Head() const final { return front_; } void Enqueue(StateId s) final { front_ = s; } void Dequeue() final { front_ = kNoStateId; } void Update(StateId) final {} bool Empty() const final { return front_ == kNoStateId; } void Clear() final { front_ = kNoStateId; } private: StateId front_; }; // First-in, first-out queue discipline. // // This is not a final class. template <class S> class FifoQueue : public QueueBase<S> { public: using StateId = S; FifoQueue() : QueueBase<StateId>(FIFO_QUEUE) {} virtual ~FifoQueue() = default; StateId Head() const override { return queue_.back(); } void Enqueue(StateId s) override { queue_.push_front(s); } void Dequeue() override { queue_.pop_back(); } void Update(StateId) override {} bool Empty() const override { return queue_.empty(); } void Clear() override { queue_.clear(); } private: std::deque<StateId> queue_; }; // Last-in, first-out queue discipline. template <class S> class LifoQueue : public QueueBase<S> { public: using StateId = S; LifoQueue() : QueueBase<StateId>(LIFO_QUEUE) {} virtual ~LifoQueue() = default; StateId Head() const final { return queue_.front(); } void Enqueue(StateId s) final { queue_.push_front(s); } void Dequeue() final { queue_.pop_front(); } void Update(StateId) final {} bool Empty() const final { return queue_.empty(); } void Clear() final { queue_.clear(); } private: std::deque<StateId> queue_; }; // Shortest-first queue discipline, templated on the StateId and as well as a // comparison functor used to compare two StateIds. If a (single) state's order // changes, it can be reordered in the queue with a call to Update(). If update // is false, call to Update() does not reorder the queue. // // This is not a final class. template <typename S, typename Compare, bool update = true> class ShortestFirstQueue : public QueueBase<S> { public: using StateId = S; explicit ShortestFirstQueue(Compare comp) : QueueBase<StateId>(SHORTEST_FIRST_QUEUE), heap_(comp) {} virtual ~ShortestFirstQueue() = default; StateId Head() const override { return heap_.Top(); } void Enqueue(StateId s) override { if (update) { for (StateId i = key_.size(); i <= s; ++i) key_.push_back(kNoStateId); key_[s] = heap_.Insert(s); } else { heap_.Insert(s); } } void Dequeue() override { if (update) { key_[heap_.Pop()] = kNoStateId; } else { heap_.Pop(); } } void Update(StateId s) override { if (!update) return; if (s >= key_.size() || key_[s] == kNoStateId) { Enqueue(s); } else { heap_.Update(key_[s], s); } } bool Empty() const override { return heap_.Empty(); } void Clear() override { heap_.Clear(); if (update) key_.clear(); } const Compare &GetCompare() const { return heap_.GetCompare(); } private: Heap<StateId, Compare> heap_; std::vector<std::ptrdiff_t> key_; }; namespace internal { // Given a vector that maps from states to weights, and a comparison functor // for weights, this class defines a comparison function object between states. template <typename StateId, typename Less> class StateWeightCompare { public: using Weight = typename Less::Weight; StateWeightCompare(const std::vector<Weight> &weights, const Less &less) : weights_(weights), less_(less) {} bool operator()(const StateId s1, const StateId s2) const { return less_(weights_[s1], weights_[s2]); } private: // Borrowed references. const std::vector<Weight> &weights_; const Less &less_; }; } // namespace internal // Shortest-first queue discipline, templated on the StateId and Weight, is // specialized to use the weight's natural order for the comparison function. template <typename S, typename Weight> class NaturalShortestFirstQueue final : public ShortestFirstQueue< S, internal::StateWeightCompare<S, NaturalLess<Weight>>> { public: using StateId = S; using Compare = internal::StateWeightCompare<StateId, NaturalLess<Weight>>; explicit NaturalShortestFirstQueue(const std::vector<Weight> &distance) : ShortestFirstQueue<StateId, Compare>(Compare(distance, less_)) {} virtual ~NaturalShortestFirstQueue() = default; private: // This is non-static because the constructor for non-idempotent weights will // result in an error. const NaturalLess<Weight> less_{}; }; // Topological-order queue discipline, templated on the StateId. States are // ordered in the queue topologically. The FST must be acyclic. template <class S> class TopOrderQueue : public QueueBase<S> { public: using StateId = S; // This constructor computes the topological order. It accepts an arc filter // to limit the transitions considered in that computation (e.g., only the // epsilon graph). template <class Arc, class ArcFilter> TopOrderQueue(const Fst<Arc> &fst, ArcFilter filter) : QueueBase<StateId>(TOP_ORDER_QUEUE), front_(0), back_(kNoStateId), order_(0), state_(0) { bool acyclic; TopOrderVisitor<Arc> top_order_visitor(&order_, &acyclic); DfsVisit(fst, &top_order_visitor, filter); if (!acyclic) { FSTERROR() << "TopOrderQueue: FST is not acyclic"; QueueBase<S>::SetError(true); } state_.resize(order_.size(), kNoStateId); } // This constructor is passed the pre-computed topological order. explicit TopOrderQueue(const std::vector<StateId> &order) : QueueBase<StateId>(TOP_ORDER_QUEUE), front_(0), back_(kNoStateId), order_(order), state_(order.size(), kNoStateId) {} virtual ~TopOrderQueue() = default; StateId Head() const final { return state_[front_]; } void Enqueue(StateId s) final { if (front_ > back_) { front_ = back_ = order_[s]; } else if (order_[s] > back_) { back_ = order_[s]; } else if (order_[s] < front_) { front_ = order_[s]; } state_[order_[s]] = s; } void Dequeue() final { state_[front_] = kNoStateId; while ((front_ <= back_) && (state_[front_] == kNoStateId)) ++front_; } void Update(StateId) final {} bool Empty() const final { return front_ > back_; } void Clear() final { for (StateId s = front_; s <= back_; ++s) state_[s] = kNoStateId; back_ = kNoStateId; front_ = 0; } private: StateId front_; StateId back_; std::vector<StateId> order_; std::vector<StateId> state_; }; // State order queue discipline, templated on the StateId. States are ordered in // the queue by state ID. template <class S> class StateOrderQueue : public QueueBase<S> { public: using StateId = S; StateOrderQueue() : QueueBase<StateId>(STATE_ORDER_QUEUE), front_(0), back_(kNoStateId) {} virtual ~StateOrderQueue() = default; StateId Head() const final { return front_; } void Enqueue(StateId s) final { if (front_ > back_) { front_ = back_ = s; } else if (s > back_) { back_ = s; } else if (s < front_) { front_ = s; } while (enqueued_.size() <= s) enqueued_.push_back(false); enqueued_[s] = true; } void Dequeue() final { enqueued_[front_] = false; while ((front_ <= back_) && (enqueued_[front_] == false)) ++front_; } void Update(StateId) final {} bool Empty() const final { return front_ > back_; } void Clear() final { for (StateId i = front_; i <= back_; ++i) enqueued_[i] = false; front_ = 0; back_ = kNoStateId; } private: StateId front_; StateId back_; std::vector<bool> enqueued_; }; // SCC topological-order meta-queue discipline, templated on the StateId and a // queue used inside each SCC. It visits the SCCs of an FST in topological // order. Its constructor is passed the queues to to use within an SCC. template <class S, class Queue> class SccQueue : public QueueBase<S> { public: using StateId = S; // Constructor takes a vector specifying the SCC number per state and a // vector giving the queue to use per SCC number. SccQueue(const std::vector<StateId> &scc, std::vector<std::unique_ptr<Queue>> *queue) : QueueBase<StateId>(SCC_QUEUE), queue_(queue), scc_(scc), front_(0), back_(kNoStateId) {} virtual ~SccQueue() = default; StateId Head() const final { while ((front_ <= back_) && (((*queue_)[front_] && (*queue_)[front_]->Empty()) || (((*queue_)[front_] == nullptr) && ((front_ >= trivial_queue_.size()) || (trivial_queue_[front_] == kNoStateId))))) { ++front_; } if ((*queue_)[front_]) { return (*queue_)[front_]->Head(); } else { return trivial_queue_[front_]; } } void Enqueue(StateId s) final { if (front_ > back_) { front_ = back_ = scc_[s]; } else if (scc_[s] > back_) { back_ = scc_[s]; } else if (scc_[s] < front_) { front_ = scc_[s]; } if ((*queue_)[scc_[s]]) { (*queue_)[scc_[s]]->Enqueue(s); } else { while (trivial_queue_.size() <= scc_[s]) { trivial_queue_.push_back(kNoStateId); } trivial_queue_[scc_[s]] = s; } } void Dequeue() final { if ((*queue_)[front_]) { (*queue_)[front_]->Dequeue(); } else if (front_ < trivial_queue_.size()) { trivial_queue_[front_] = kNoStateId; } } void Update(StateId s) final { if ((*queue_)[scc_[s]]) (*queue_)[scc_[s]]->Update(s); } bool Empty() const final { // Queues SCC number back_ is not empty unless back_ == front_. if (front_ < back_) { return false; } else if (front_ > back_) { return true; } else if ((*queue_)[front_]) { return (*queue_)[front_]->Empty(); } else { return (front_ >= trivial_queue_.size()) || (trivial_queue_[front_] == kNoStateId); } } void Clear() final { for (StateId i = front_; i <= back_; ++i) { if ((*queue_)[i]) { (*queue_)[i]->Clear(); } else if (i < trivial_queue_.size()) { trivial_queue_[i] = kNoStateId; } } front_ = 0; back_ = kNoStateId; } private: std::vector<std::unique_ptr<Queue>> *queue_; const std::vector<StateId> &scc_; mutable StateId front_; StateId back_; std::vector<StateId> trivial_queue_; }; // Automatic queue discipline. It selects a queue discipline for a given FST // based on its properties. template <class S> class AutoQueue : public QueueBase<S> { public: using StateId = S; // This constructor takes a state distance vector that, if non-null and if // the Weight type has the path property, will entertain the shortest-first // queue using the natural order w.r.t to the distance. template <class Arc, class ArcFilter> AutoQueue(const Fst<Arc> &fst, const std::vector<typename Arc::Weight> *distance, ArcFilter filter) : QueueBase<StateId>(AUTO_QUEUE) { using Weight = typename Arc::Weight; using Less = NaturalLess<Weight>; using Compare = internal::StateWeightCompare<StateId, Less>; // First checks if the FST is known to have these properties. const auto props = fst.Properties(kAcyclic | kCyclic | kTopSorted | kUnweighted, false); if ((props & kTopSorted) || fst.Start() == kNoStateId) { queue_.reset(new StateOrderQueue<StateId>()); VLOG(2) << "AutoQueue: using state-order discipline"; } else if (props & kAcyclic) { queue_.reset(new TopOrderQueue<StateId>(fst, filter)); VLOG(2) << "AutoQueue: using top-order discipline"; } else if ((props & kUnweighted) && (Weight::Properties() & kIdempotent)) { queue_.reset(new LifoQueue<StateId>()); VLOG(2) << "AutoQueue: using LIFO discipline"; } else { uint64_t properties; // Decomposes into strongly-connected components. SccVisitor<Arc> scc_visitor(&scc_, nullptr, nullptr, &properties); DfsVisit(fst, &scc_visitor, filter); auto nscc = *std::max_element(scc_.begin(), scc_.end()) + 1; std::vector<QueueType> queue_types(nscc); std::unique_ptr<Less> less; std::unique_ptr<Compare> comp; if (distance && (Weight::Properties() & kPath) == kPath) { less.reset(new Less); comp.reset(new Compare(*distance, *less)); } // Finds the queue type to use per SCC. bool unweighted; bool all_trivial; SccQueueType(fst, scc_, &queue_types, filter, less.get(), &all_trivial, &unweighted); // If unweighted and semiring is idempotent, uses LIFO queue. if (unweighted) { queue_.reset(new LifoQueue<StateId>()); VLOG(2) << "AutoQueue: using LIFO discipline"; return; } // If all the SCC are trivial, the FST is acyclic and the scc number gives // the topological order. if (all_trivial) { queue_.reset(new TopOrderQueue<StateId>(scc_)); VLOG(2) << "AutoQueue: using top-order discipline"; return; } VLOG(2) << "AutoQueue: using SCC meta-discipline"; queues_.resize(nscc); for (StateId i = 0; i < nscc; ++i) { switch (queue_types[i]) { case TRIVIAL_QUEUE: queues_[i].reset(); VLOG(3) << "AutoQueue: SCC #" << i << ": using trivial discipline"; break; case SHORTEST_FIRST_QUEUE: queues_[i].reset( new ShortestFirstQueue<StateId, Compare, false>(*comp)); VLOG(3) << "AutoQueue: SCC #" << i << ": using shortest-first discipline"; break; case LIFO_QUEUE: queues_[i].reset(new LifoQueue<StateId>()); VLOG(3) << "AutoQueue: SCC #" << i << ": using LIFO discipline"; break; case FIFO_QUEUE: default: queues_[i].reset(new FifoQueue<StateId>()); VLOG(3) << "AutoQueue: SCC #" << i << ": using FIFO discipine"; break; } } queue_.reset(new SccQueue<StateId, QueueBase<StateId>>(scc_, &queues_)); } } virtual ~AutoQueue() = default; StateId Head() const final { return queue_->Head(); } void Enqueue(StateId s) final { queue_->Enqueue(s); } void Dequeue() final { queue_->Dequeue(); } void Update(StateId s) final { queue_->Update(s); } bool Empty() const final { return queue_->Empty(); } void Clear() final { queue_->Clear(); } private: template <class Arc, class ArcFilter, class Less> static void SccQueueType(const Fst<Arc> &fst, const std::vector<StateId> &scc, std::vector<QueueType> *queue_types, ArcFilter filter, Less *less, bool *all_trivial, bool *unweighted); std::unique_ptr<QueueBase<StateId>> queue_; std::vector<std::unique_ptr<QueueBase<StateId>>> queues_; std::vector<StateId> scc_; }; // Examines the states in an FST's strongly connected components and determines // which type of queue to use per SCC. Stores result as a vector of QueueTypes // which is assumed to have length equal to the number of SCCs. An arc filter // is used to limit the transitions considered (e.g., only the epsilon graph). // The argument all_trivial is set to true if every queue is the trivial queue. // The argument unweighted is set to true if the semiring is idempotent and all // the arc weights are equal to Zero() or One(). template <class StateId> template <class Arc, class ArcFilter, class Less> void AutoQueue<StateId>::SccQueueType(const Fst<Arc> &fst, const std::vector<StateId> &scc, std::vector<QueueType> *queue_type, ArcFilter filter, Less *less, bool *all_trivial, bool *unweighted) { using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; *all_trivial = true; *unweighted = true; for (StateId i = 0; i < queue_type->size(); ++i) { (*queue_type)[i] = TRIVIAL_QUEUE; } for (StateIterator<Fst<Arc>> sit(fst); !sit.Done(); sit.Next()) { const auto state = sit.Value(); for (ArcIterator<Fst<Arc>> ait(fst, state); !ait.Done(); ait.Next()) { const auto &arc = ait.Value(); if (!filter(arc)) continue; if (scc[state] == scc[arc.nextstate]) { auto &type = (*queue_type)[scc[state]]; if (!less || ((*less)(arc.weight, Weight::One()))) { type = FIFO_QUEUE; } else if ((type == TRIVIAL_QUEUE) || (type == LIFO_QUEUE)) { if (!(Weight::Properties() & kIdempotent) || (arc.weight != Weight::Zero() && arc.weight != Weight::One())) { type = SHORTEST_FIRST_QUEUE; } else { type = LIFO_QUEUE; } } if (type != TRIVIAL_QUEUE) *all_trivial = false; } if (!(Weight::Properties() & kIdempotent) || (arc.weight != Weight::Zero() && arc.weight != Weight::One())) { *unweighted = false; } } } } // An A* estimate is a function object that maps from a state ID to an // estimate of the shortest distance to the final states. // A trivial A* estimate, yielding a queue which behaves the same in Dijkstra's // algorithm. template <typename StateId, typename Weight> struct TrivialAStarEstimate { const Weight &operator()(StateId) const { return Weight::One(); } }; // A non-trivial A* estimate using a vector of the estimated future costs. template <typename StateId, typename Weight> class NaturalAStarEstimate { public: NaturalAStarEstimate(const std::vector<Weight> &beta) : beta_(beta) {} const Weight &operator()(StateId s) const { return beta_[s]; } private: const std::vector<Weight> &beta_; }; // Given a vector that maps from states to weights representing the shortest // distance from the initial state, a comparison function object between // weights, and an estimate of the shortest distance to the final states, this // class defines a comparison function object between states. template <typename S, typename Less, typename Estimate> class AStarWeightCompare { public: using StateId = S; using Weight = typename Less::Weight; AStarWeightCompare(const std::vector<Weight> &weights, const Less &less, const Estimate &estimate) : weights_(weights), less_(less), estimate_(estimate) {} bool operator()(StateId s1, StateId s2) const { const auto w1 = Times(weights_[s1], estimate_(s1)); const auto w2 = Times(weights_[s2], estimate_(s2)); return less_(w1, w2); } const Estimate &GetEstimate() const { return estimate_; } private: const std::vector<Weight> &weights_; const Less &less_; const Estimate &estimate_; }; // A* queue discipline templated on StateId, Weight, and Estimate. template <typename S, typename Weight, typename Estimate> class NaturalAStarQueue : public ShortestFirstQueue< S, AStarWeightCompare<S, NaturalLess<Weight>, Estimate>> { public: using StateId = S; using Compare = AStarWeightCompare<StateId, NaturalLess<Weight>, Estimate>; NaturalAStarQueue(const std::vector<Weight> &distance, const Estimate &estimate) : ShortestFirstQueue<StateId, Compare>( Compare(distance, less_, estimate)) {} ~NaturalAStarQueue() = default; private: // This is non-static because the constructor for non-idempotent weights will // result in an error. const NaturalLess<Weight> less_{}; }; // A state equivalence class is a function object that maps from a state ID to // an equivalence class (state) ID. The trivial equivalence class maps a state // ID to itself. template <typename StateId> struct TrivialStateEquivClass { StateId operator()(StateId s) const { return s; } }; // Distance-based pruning queue discipline: Enqueues a state only when its // shortest distance (so far), as specified by distance, is less than (as // specified by comp) the shortest distance Times() the threshold to any state // in the same equivalence class, as specified by the functor class_func. The // underlying queue discipline is specified by queue. The ownership of queue is // given to this class. // // This is not a final class. template <typename Queue, typename Less, typename ClassFnc> class PruneQueue : public QueueBase<typename Queue::StateId> { public: using StateId = typename Queue::StateId; using Weight = typename Less::Weight; PruneQueue(const std::vector<Weight> &distance, Queue *queue, const Less &less, const ClassFnc &class_fnc, Weight threshold) : QueueBase<StateId>(OTHER_QUEUE), distance_(distance), queue_(queue), less_(less), class_fnc_(class_fnc), threshold_(std::move(threshold)) {} virtual ~PruneQueue() = default; StateId Head() const override { return queue_->Head(); } void Enqueue(StateId s) override { const auto c = class_fnc_(s); if (c >= class_distance_.size()) { class_distance_.resize(c + 1, Weight::Zero()); } if (less_(distance_[s], class_distance_[c])) { class_distance_[c] = distance_[s]; } // Enqueues only if below threshold limit. const auto limit = Times(class_distance_[c], threshold_); if (less_(distance_[s], limit)) queue_->Enqueue(s); } void Dequeue() override { queue_->Dequeue(); } void Update(StateId s) override { const auto c = class_fnc_(s); if (less_(distance_[s], class_distance_[c])) { class_distance_[c] = distance_[s]; } queue_->Update(s); } bool Empty() const override { return queue_->Empty(); } void Clear() override { queue_->Clear(); } private: const std::vector<Weight> &distance_; // Shortest distance to state. std::unique_ptr<Queue> queue_; const Less &less_; // Borrowed reference. const ClassFnc &class_fnc_; // Equivalence class functor. Weight threshold_; // Pruning weight threshold. std::vector<Weight> class_distance_; // Shortest distance to class. }; // Pruning queue discipline (see above) using the weight's natural order for the // comparison function. The ownership of the queue argument is given to this // class. template <typename Queue, typename Weight, typename ClassFnc> class NaturalPruneQueue final : public PruneQueue<Queue, NaturalLess<Weight>, ClassFnc> { public: using StateId = typename Queue::StateId; NaturalPruneQueue(const std::vector<Weight> &distance, Queue *queue, const ClassFnc &class_fnc, Weight threshold) : PruneQueue<Queue, NaturalLess<Weight>, ClassFnc>( distance, queue, NaturalLess<Weight>(), class_fnc, threshold) {} virtual ~NaturalPruneQueue() = default; }; // Filter-based pruning queue discipline: enqueues a state only if allowed by // the filter, specified by the state filter functor argument. The underlying // queue discipline is specified by the queue argument. The ownership of the // queue is given to this class. template <typename Queue, typename Filter> class FilterQueue : public QueueBase<typename Queue::StateId> { public: using StateId = typename Queue::StateId; FilterQueue(Queue *queue, const Filter &filter) : QueueBase<StateId>(OTHER_QUEUE), queue_(queue), filter_(filter) {} virtual ~FilterQueue() = default; StateId Head() const final { return queue_->Head(); } // Enqueues only if allowed by state filter. void Enqueue(StateId s) final { if (filter_(s)) queue_->Enqueue(s); } void Dequeue() final { queue_->Dequeue(); } void Update(StateId s) final {} bool Empty() const final { return queue_->Empty(); } void Clear() final { queue_->Clear(); } private: std::unique_ptr<Queue> queue_; const Filter &filter_; }; } // namespace fst #endif // FST_QUEUE_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/script/push.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/push.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Push(MutableFstClass *fst, ReweightType rew_type, float delta, bool remove_total_weight) { PushArgs1 args(fst, rew_type, delta, remove_total_weight); Apply<Operation<PushArgs1>>("Push", fst->ArcType(), &args); } void Push(const FstClass &ifst, MutableFstClass *ofst, uint32 flags, ReweightType rew_type, float delta) { if (!internal::ArcTypesMatch(ifst, *ofst, "Push")) { ofst->SetProperties(kError, kError); return; } PushArgs2 args(ifst, ofst, flags, rew_type, delta); Apply<Operation<PushArgs2>>("Push", ifst.ArcType(), &args); } REGISTER_FST_OPERATION(Push, StdArc, PushArgs1); REGISTER_FST_OPERATION(Push, LogArc, PushArgs1); REGISTER_FST_OPERATION(Push, Log64Arc, PushArgs1); REGISTER_FST_OPERATION(Push, StdArc, PushArgs2); REGISTER_FST_OPERATION(Push, LogArc, PushArgs2); REGISTER_FST_OPERATION(Push, Log64Arc, PushArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/compact/compact16_unweighted_acceptor-fst.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/fst.h> #include <fst/compact-fst.h> namespace fst { static FstRegisterer< CompactUnweightedAcceptorFst<StdArc, uint16>> CompactUnweightedAcceptorFst_StdArc_uint16_registerer; static FstRegisterer< CompactUnweightedAcceptorFst<LogArc, uint16>> CompactUnweightedAcceptorFst_LogArc_uint16_registerer; } // namespace fst
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/xtts/gpt.py
# ported from: https://github.com/neonbjb/tortoise-tts import functools import math import random import torch import torch.nn as nn import torch.nn.functional as F from transformers import GPT2Config from TTS.tts.layers.xtts.gpt_inference import GPT2InferenceModel from TTS.tts.layers.xtts.latent_encoder import ConditioningEncoder from TTS.tts.layers.xtts.perceiver_encoder import PerceiverResampler def null_position_embeddings(range, dim): return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device) class LearnedPositionEmbeddings(nn.Module): def __init__(self, seq_len, model_dim, init=0.02, relative=False): super().__init__() # nn.Embedding self.emb = torch.nn.Embedding(seq_len, model_dim) # Initializing this way is standard for GPT-2 self.emb.weight.data.normal_(mean=0.0, std=init) self.relative = relative self.seq_len = seq_len def forward(self, x): sl = x.shape[1] if self.relative: start = random.randint(sl, self.seq_len) - sl return self.emb(torch.arange(start, start + sl, device=x.device)) else: return self.emb(torch.arange(0, sl, device=x.device)) def get_fixed_embedding(self, ind, dev): return self.emb(torch.tensor([ind], device=dev)).unsqueeze(0) def build_hf_gpt_transformer( layers, model_dim, heads, max_mel_seq_len, max_text_seq_len, max_prompt_len, checkpointing, ): """ GPT-2 implemented by the HuggingFace library. """ from transformers import GPT2Config, GPT2Model gpt_config = GPT2Config( vocab_size=256, # Unused. n_positions=max_mel_seq_len + max_text_seq_len + max_prompt_len, n_ctx=max_mel_seq_len + max_text_seq_len + max_prompt_len, n_embd=model_dim, n_layer=layers, n_head=heads, gradient_checkpointing=checkpointing, use_cache=not checkpointing, ) gpt = GPT2Model(gpt_config) # Override the built in positional embeddings del gpt.wpe gpt.wpe = functools.partial(null_position_embeddings, dim=model_dim) # Built-in token embeddings are unused. del gpt.wte mel_pos_emb = ( LearnedPositionEmbeddings(max_mel_seq_len, model_dim) if max_mel_seq_len != -1 else functools.partial(null_position_embeddings, dim=model_dim) ) text_pos_emb = ( LearnedPositionEmbeddings(max_text_seq_len, model_dim) if max_mel_seq_len != -1 else functools.partial(null_position_embeddings, dim=model_dim) ) # gpt = torch.compile(gpt, mode="reduce-overhead", fullgraph=True) return gpt, mel_pos_emb, text_pos_emb, None, None class GPT(nn.Module): def __init__( self, start_text_token=261, stop_text_token=0, layers=8, model_dim=512, heads=8, max_text_tokens=120, max_mel_tokens=250, max_prompt_tokens=70, max_conditioning_inputs=1, code_stride_len=1024, number_text_tokens=256, num_audio_tokens=8194, start_audio_token=8192, stop_audio_token=8193, train_solo_embeddings=False, checkpointing=False, average_conditioning_embeddings=False, label_smoothing=0.0, use_perceiver_resampler=False, perceiver_cond_length_compression=256, ): """ Args: """ super().__init__() self.label_smoothing = label_smoothing self.number_text_tokens = number_text_tokens self.start_text_token = start_text_token self.stop_text_token = stop_text_token self.num_audio_tokens = num_audio_tokens self.start_audio_token = start_audio_token self.stop_audio_token = stop_audio_token self.start_prompt_token = start_audio_token self.stop_prompt_token = stop_audio_token self.layers = layers self.heads = heads self.model_dim = model_dim self.max_conditioning_inputs = max_conditioning_inputs self.max_gen_mel_tokens = max_mel_tokens - self.max_conditioning_inputs - 2 self.max_mel_tokens = -1 if max_mel_tokens == -1 else max_mel_tokens + 2 + self.max_conditioning_inputs self.max_text_tokens = -1 if max_text_tokens == -1 else max_text_tokens + 2 self.max_prompt_tokens = max_prompt_tokens self.code_stride_len = code_stride_len self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads) self.conditioning_dropout = nn.Dropout1d(0.1) self.average_conditioning_embeddings = average_conditioning_embeddings self.use_perceiver_resampler = use_perceiver_resampler self.perceiver_cond_length_compression = perceiver_cond_length_compression self.text_embedding = nn.Embedding(self.number_text_tokens, model_dim) self.mel_embedding = nn.Embedding(self.num_audio_tokens, model_dim) ( self.gpt, self.mel_pos_embedding, self.text_pos_embedding, self.mel_layer_pos_embedding, self.text_layer_pos_embedding, ) = build_hf_gpt_transformer( layers, model_dim, heads, self.max_mel_tokens, self.max_text_tokens, self.max_prompt_tokens, checkpointing, ) if train_solo_embeddings: self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True) self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True) else: self.mel_solo_embedding = 0 self.text_solo_embedding = 0 self.final_norm = nn.LayerNorm(model_dim) self.text_head = nn.Linear(model_dim, self.number_text_tokens) self.mel_head = nn.Linear(model_dim, self.num_audio_tokens) if self.use_perceiver_resampler: # XTTS v2 self.conditioning_perceiver = PerceiverResampler( dim=model_dim, depth=2, dim_context=model_dim, num_latents=32, dim_head=64, heads=8, ff_mult=4, use_flash_attn=False, ) else: # XTTS v1 self.prompt_embedding = nn.Embedding(self.num_audio_tokens, model_dim) self.prompt_pos_embedding = LearnedPositionEmbeddings(24 * 9, model_dim) def get_grad_norm_parameter_groups(self): return { "conditioning_encoder": list(self.conditioning_encoder.parameters()), "conditioning_perceiver": list(self.conditioning_perceiver.parameters()) if self.use_perceiver_resampler else None, "gpt": list(self.gpt.parameters()), "heads": list(self.text_head.parameters()) + list(self.mel_head.parameters()), } def init_gpt_for_inference(self, kv_cache=True, use_deepspeed=False): seq_length = self.max_prompt_tokens + self.max_mel_tokens + self.max_text_tokens + 1 gpt_config = GPT2Config( vocab_size=self.max_mel_tokens, n_positions=seq_length, n_ctx=seq_length, n_embd=self.model_dim, n_layer=self.layers, n_head=self.heads, gradient_checkpointing=False, use_cache=True, ) self.gpt_inference = GPT2InferenceModel( gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding, self.final_norm, self.mel_head, kv_cache=kv_cache, ) self.gpt.wte = self.mel_embedding if use_deepspeed: import deepspeed self.ds_engine = deepspeed.init_inference( model=self.gpt_inference.half(), # Transformers models mp_size=1, # Number of GPU dtype=torch.float32, # desired data type of output replace_method="auto", # Lets DS autmatically identify the layer to replace replace_with_kernel_inject=True, # replace the model with the kernel injector ) self.gpt_inference = self.ds_engine.module.eval() def set_inputs_and_targets(self, input, start_token, stop_token): inp = F.pad(input, (1, 0), value=start_token) tar = F.pad(input, (0, 1), value=stop_token) return inp, tar def set_mel_padding(self, mel_input_tokens, code_lengths): """ Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in that audio clip, reformats the tokens with stop_audio_token in place of the zero padding. This is required preformatting to create a working TTS model. """ # Set padding areas within MEL (currently it is coded with the MEL code for <zero>). for b in range(len(code_lengths)): actual_end = code_lengths[b] if actual_end < mel_input_tokens.shape[-1]: mel_input_tokens[b, actual_end:] = self.stop_audio_token return mel_input_tokens def get_logits( self, first_inputs, first_head, second_inputs=None, second_head=None, prompt=None, get_attns=False, return_latent=False, attn_mask_cond=None, attn_mask_text=None, attn_mask_mel=None, ): if prompt is not None: offset = prompt.shape[1] if second_inputs is not None: emb = torch.cat([prompt, first_inputs, second_inputs], dim=1) else: emb = torch.cat([prompt, first_inputs], dim=1) # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): attn_mask = None if attn_mask_text is not None: attn_mask = torch.cat([attn_mask_text, attn_mask_mel], dim=1) if prompt is not None: attn_mask_cond = torch.ones(prompt.shape[0], offset, dtype=torch.bool, device=emb.device) attn_mask = torch.cat([attn_mask_cond, attn_mask], dim=1) gpt_out = self.gpt( inputs_embeds=emb, return_dict=True, output_attentions=get_attns, attention_mask=attn_mask, ) if get_attns: return gpt_out.attentions enc = gpt_out.last_hidden_state[:, offset:] enc = self.final_norm(enc) if return_latent: return enc[:, : first_inputs.shape[1]], enc[:, -second_inputs.shape[1] :] first_logits = enc[:, : first_inputs.shape[1]] first_logits = first_head(first_logits) first_logits = first_logits.permute(0, 2, 1) if second_inputs is not None: second_logits = enc[:, -second_inputs.shape[1] :] second_logits = second_head(second_logits) second_logits = second_logits.permute(0, 2, 1) return first_logits, second_logits else: return first_logits def get_conditioning(self, speech_conditioning_input): speech_conditioning_input = ( speech_conditioning_input.unsqueeze(1) if len(speech_conditioning_input.shape) == 3 else speech_conditioning_input ) conds = [] for j in range(speech_conditioning_input.shape[1]): conds.append(self.conditioning_encoder(speech_conditioning_input[:, j])) conds = torch.stack(conds, dim=1) conds = conds.mean(dim=1) return conds def get_prompts(self, prompt_codes): """ Create a prompt from the mel codes. This is used to condition the model on the mel codes. Pad the prompt with start and stop mel tokens. """ prompt = prompt_codes if self.training: lengths = [] # Compute the real prompt length based on the first encounter with the token 83 used for padding for i in range(prompt_codes.shape[0]): length = 0 for j in range(prompt_codes.shape[1]): if prompt_codes[i, j] == 83: break else: length += 1 lengths.append(length) # prompt_len = random.randint(1, 9) # in secs prompt_len = 3 prompt_len = prompt_len * 24 # in frames if prompt_codes.shape[-1] >= prompt_len: for i in range(prompt_codes.shape[0]): if lengths[i] < prompt_len: start = 0 else: start = random.randint(0, lengths[i] - prompt_len) prompt = prompt_codes[:, start : start + prompt_len] # add start and stop tokens prompt = F.pad(prompt, (1, 0), value=self.start_prompt_token) prompt = F.pad(prompt, (0, 1), value=self.stop_prompt_token) return prompt def get_style_emb(self, cond_input, return_latent=False): """ cond_input: (b, 80, s) or (b, 1, 80, s) conds: (b, 1024, s) """ conds = None if not return_latent: if cond_input.ndim == 4: cond_input = cond_input.squeeze(1) conds = self.conditioning_encoder(cond_input) # (b, d, s) if self.use_perceiver_resampler: conds = self.conditioning_perceiver(conds.permute(0, 2, 1)).transpose(1, 2) # (b, d, 32) else: # already computed conds = cond_input.unsqueeze(1) return conds def forward( self, text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels=None, cond_idxs=None, cond_lens=None, cond_latents=None, return_attentions=False, return_latent=False, ): """ Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode (actuated by `text_first`). text_inputs: long tensor, (b,t) text_lengths: long tensor, (b,) mel_inputs: long tensor, (b,m) wav_lengths: long tensor, (b,) cond_mels: MEL float tensor, (b, 1, 80,s) cond_idxs: cond start and end indexs, (b, 2) If return_attentions is specified, only logits are returned. If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned. """ # ❗ FIXIT if self.max_conditioning_inputs == 0: assert cond_mels is None, " ❗ cond_mels is not None, but max_conditioning_inputs == 0" max_text_len = text_lengths.max() code_lengths = torch.ceil(wav_lengths / self.code_stride_len).long() + 3 if cond_lens is not None: if self.use_perceiver_resampler: cond_lens = cond_lens // self.perceiver_cond_length_compression else: cond_lens = cond_lens // self.code_stride_len if cond_idxs is not None: # recompute cond idxs for mel lengths for idx in range(cond_idxs.size(0)): if self.use_perceiver_resampler: cond_idxs[idx] = cond_idxs[idx] // self.perceiver_cond_length_compression else: cond_idxs[idx] = cond_idxs[idx] // self.code_stride_len # ensure that the cond_mel does not have padding # if cond_lens is not None and cond_idxs is None: # min_cond_len = torch.min(cond_lens) # cond_mels = cond_mels[:, :, :, :min_cond_len] # If len(codes) + 3 is larger than maxiumum allowed length, we truncate the codes. max_mel_len = code_lengths.max() if max_mel_len > audio_codes.shape[-1]: audio_codes = F.pad(audio_codes, (0, max_mel_len - audio_codes.shape[-1])) # 💖 Lovely assertions assert ( max_mel_len <= audio_codes.shape[-1] ), f" ❗ max_mel_len ({max_mel_len}) > audio_codes.shape[-1] ({audio_codes.shape[-1]})" assert ( max_text_len <= text_inputs.shape[-1] ), f" ❗ max_text_len ({max_text_len}) > text_inputs.shape[-1] ({text_inputs.shape[-1]})" # Append stop token to text inputs text_inputs = F.pad(text_inputs[:, :max_text_len], (0, 1), value=self.stop_text_token) # Append silence token to mel codes audio_codes = F.pad(audio_codes[:, :max_mel_len], (0, 1), value=self.stop_audio_token) # Pad mel codes with stop_audio_token audio_codes = self.set_mel_padding( audio_codes, code_lengths - 3 ) # -3 to get the real code lengths without consider start and stop tokens that was not added yet # Build input and target tensors # Prepend start token to inputs and append stop token to targets text_inputs, text_targets = self.set_inputs_and_targets( text_inputs, self.start_text_token, self.stop_text_token ) audio_codes, mel_targets = self.set_inputs_and_targets( audio_codes, self.start_audio_token, self.stop_audio_token ) # Set attn_mask attn_mask_cond = None attn_mask_text = None attn_mask_mel = None if not return_latent: attn_mask_cond = torch.ones( cond_mels.shape[0], cond_mels.shape[-1], dtype=torch.bool, device=text_inputs.device, ) attn_mask_text = torch.ones( text_inputs.shape[0], text_inputs.shape[1], dtype=torch.bool, device=text_inputs.device, ) attn_mask_mel = torch.ones( audio_codes.shape[0], audio_codes.shape[1], dtype=torch.bool, device=audio_codes.device, ) if cond_idxs is not None: # use masking approach for idx, r in enumerate(cond_idxs): l = r[1] - r[0] attn_mask_cond[idx, l:] = 0.0 elif cond_lens is not None: for idx, l in enumerate(cond_lens): attn_mask_cond[idx, l:] = 0.0 for idx, l in enumerate(text_lengths): attn_mask_text[idx, l + 1 :] = 0.0 for idx, l in enumerate(code_lengths): attn_mask_mel[idx, l + 1 :] = 0.0 # Compute text embeddings + positional embeddings text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs) # Compute mel embeddings + positional embeddings mel_emb = self.mel_embedding(audio_codes) + self.mel_pos_embedding(audio_codes) # Compute speech conditioning input if cond_latents is None: cond_latents = self.get_style_emb(cond_mels).transpose(1, 2) # Get logits sub = -5 # don't ask me why 😄 if self.training: sub = -1 text_logits, mel_logits = self.get_logits( text_emb, self.text_head, mel_emb, self.mel_head, prompt=cond_latents, get_attns=return_attentions, return_latent=return_latent, attn_mask_cond=attn_mask_cond, attn_mask_text=attn_mask_text, attn_mask_mel=attn_mask_mel, ) if return_latent: return mel_logits[:, :sub] # sub to prevent bla. if return_attentions: return mel_logits # Set paddings to -1 to ignore them in loss for idx, l in enumerate(text_lengths): text_targets[idx, l + 1 :] = -1 for idx, l in enumerate(code_lengths): mel_targets[idx, l + 1 :] = -1 # check if stoptoken is in every row of mel_targets assert (mel_targets == self.stop_audio_token).sum() >= mel_targets.shape[ 0 ], f" ❗ mel_targets does not contain stop token ({self.stop_audio_token}) in every row." # ignore the loss for the segment used for conditioning # coin flip for the segment to be ignored if cond_idxs is not None: cond_start = cond_idxs[idx, 0] cond_end = cond_idxs[idx, 1] mel_targets[idx, cond_start:cond_end] = -1 # Compute losses loss_text = F.cross_entropy( text_logits, text_targets.long(), ignore_index=-1, label_smoothing=self.label_smoothing ) loss_mel = F.cross_entropy( mel_logits, mel_targets.long(), ignore_index=-1, label_smoothing=self.label_smoothing ) return loss_text.mean(), loss_mel.mean(), mel_logits def inference(self, cond_latents, text_inputs, **hf_generate_kwargs): self.compute_embeddings(cond_latents, text_inputs) return self.generate(cond_latents, text_inputs, **hf_generate_kwargs) def compute_embeddings( self, cond_latents, text_inputs, ): text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token) text_inputs = F.pad(text_inputs, (1, 0), value=self.start_text_token) emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs) emb = torch.cat([cond_latents, emb], dim=1) self.gpt_inference.store_prefix_emb(emb) gpt_inputs = torch.full( ( emb.shape[0], emb.shape[1] + 1, # +1 for the start_audio_token ), fill_value=1, dtype=torch.long, device=text_inputs.device, ) gpt_inputs[:, -1] = self.start_audio_token return gpt_inputs def generate( self, cond_latents, text_inputs, **hf_generate_kwargs, ): gpt_inputs = self.compute_embeddings(cond_latents, text_inputs) gen = self.gpt_inference.generate( gpt_inputs, bos_token_id=self.start_audio_token, pad_token_id=self.stop_audio_token, eos_token_id=self.stop_audio_token, max_length=self.max_gen_mel_tokens + gpt_inputs.shape[-1], **hf_generate_kwargs, ) if "return_dict_in_generate" in hf_generate_kwargs: return gen.sequences[:, gpt_inputs.shape[1] :], gen return gen[:, gpt_inputs.shape[1] :] def get_generator(self, fake_inputs, **hf_generate_kwargs): return self.gpt_inference.generate_stream( fake_inputs, bos_token_id=self.start_audio_token, pad_token_id=self.stop_audio_token, eos_token_id=self.stop_audio_token, max_length=self.max_gen_mel_tokens + fake_inputs.shape[-1], do_stream=True, **hf_generate_kwargs, )
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/util/sized_iterator_test.cc
#include "sized_iterator.hh" #define BOOST_TEST_MODULE SizedIteratorTest #include <boost/test/unit_test.hpp> namespace util { namespace { struct CompareChar { bool operator()(const void *first, const void *second) const { return *static_cast<const char*>(first) < *static_cast<const char*>(second); } }; BOOST_AUTO_TEST_CASE(sort) { char items[3] = {1, 2, 0}; SizedSort(items, items + 3, 1, CompareChar()); BOOST_CHECK_EQUAL(0, items[0]); BOOST_CHECK_EQUAL(1, items[1]); BOOST_CHECK_EQUAL(2, items[2]); } }} // namespace anonymous util
0
coqui_public_repos/inference-engine/third_party/kenlm
coqui_public_repos/inference-engine/third_party/kenlm/util/integer_to_string.cc
#include <iostream> /* Fast integer to string conversion. Source: https://github.com/miloyip/itoa-benchmark Local modifications: 1. Return end of buffer instead of null terminating 2. Collapse to single file 3. Namespace 4. Remove test hook 5. Non-x86 support from the branch_lut code 6. Rename functions 7. Require __SSE2__ on i386 Copyright (C) 2014 Milo Yip Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Which is based on: http://0x80.pl/snippets/asm/sse-utoa.c SSE: conversion integers to decimal representation Author: Wojciech Muła e-mail: wojciech_mula@poczta.onet.pl www: http://0x80.pl/ License: BSD initial release 2011-10-21 $Id$ */ #include "util/integer_to_string.hh" #include <cassert> #include <stdint.h> namespace util { namespace { const char gDigitsLut[200] = { '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' }; } // namespace // SSE2 implementation according to http://0x80.pl/articles/sse-itoa.html // Modifications: (1) fix incorrect digits (2) accept all ranges (3) write to user provided buffer. #if defined(__amd64) || defined(_M_X64) || (defined(__SSE2__) && (defined(_M_IX86) || defined(i386))) #include <emmintrin.h> #ifdef _MSC_VER #include "intrin.h" #endif #ifdef _MSC_VER #define ALIGN_PRE __declspec(align(16)) #define ALIGN_SUF #else #define ALIGN_PRE #define ALIGN_SUF __attribute__ ((aligned(16))) #endif namespace { static const uint32_t kDiv10000 = 0xd1b71759; ALIGN_PRE static const uint32_t kDiv10000Vector[4] ALIGN_SUF = { kDiv10000, kDiv10000, kDiv10000, kDiv10000 }; ALIGN_PRE static const uint32_t k10000Vector[4] ALIGN_SUF = { 10000, 10000, 10000, 10000 }; ALIGN_PRE static const uint16_t kDivPowersVector[8] ALIGN_SUF = { 8389, 5243, 13108, 32768, 8389, 5243, 13108, 32768 }; // 10^3, 10^2, 10^1, 10^0 ALIGN_PRE static const uint16_t kShiftPowersVector[8] ALIGN_SUF = { 1 << (16 - (23 + 2 - 16)), 1 << (16 - (19 + 2 - 16)), 1 << (16 - 1 - 2), 1 << (15), 1 << (16 - (23 + 2 - 16)), 1 << (16 - (19 + 2 - 16)), 1 << (16 - 1 - 2), 1 << (15) }; ALIGN_PRE static const uint16_t k10Vector[8] ALIGN_SUF = { 10, 10, 10, 10, 10, 10, 10, 10 }; ALIGN_PRE static const char kAsciiZero[16] ALIGN_SUF = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0' }; inline __m128i Convert8DigitsSSE2(uint32_t value) { assert(value <= 99999999); // abcd, efgh = abcdefgh divmod 10000 const __m128i abcdefgh = _mm_cvtsi32_si128(value); const __m128i abcd = _mm_srli_epi64(_mm_mul_epu32(abcdefgh, reinterpret_cast<const __m128i*>(kDiv10000Vector)[0]), 45); const __m128i efgh = _mm_sub_epi32(abcdefgh, _mm_mul_epu32(abcd, reinterpret_cast<const __m128i*>(k10000Vector)[0])); // v1 = [ abcd, efgh, 0, 0, 0, 0, 0, 0 ] const __m128i v1 = _mm_unpacklo_epi16(abcd, efgh); // v1a = v1 * 4 = [ abcd * 4, efgh * 4, 0, 0, 0, 0, 0, 0 ] const __m128i v1a = _mm_slli_epi64(v1, 2); // v2 = [ abcd * 4, abcd * 4, abcd * 4, abcd * 4, efgh * 4, efgh * 4, efgh * 4, efgh * 4 ] const __m128i v2a = _mm_unpacklo_epi16(v1a, v1a); const __m128i v2 = _mm_unpacklo_epi32(v2a, v2a); // v4 = v2 div 10^3, 10^2, 10^1, 10^0 = [ a, ab, abc, abcd, e, ef, efg, efgh ] const __m128i v3 = _mm_mulhi_epu16(v2, reinterpret_cast<const __m128i*>(kDivPowersVector)[0]); const __m128i v4 = _mm_mulhi_epu16(v3, reinterpret_cast<const __m128i*>(kShiftPowersVector)[0]); // v5 = v4 * 10 = [ a0, ab0, abc0, abcd0, e0, ef0, efg0, efgh0 ] const __m128i v5 = _mm_mullo_epi16(v4, reinterpret_cast<const __m128i*>(k10Vector)[0]); // v6 = v5 << 16 = [ 0, a0, ab0, abc0, 0, e0, ef0, efg0 ] const __m128i v6 = _mm_slli_epi64(v5, 16); // v7 = v4 - v6 = { a, b, c, d, e, f, g, h } const __m128i v7 = _mm_sub_epi16(v4, v6); return v7; } inline __m128i ShiftDigits_SSE2(__m128i a, unsigned digit) { assert(digit <= 8); switch (digit) { case 0: return a; case 1: return _mm_srli_si128(a, 1); case 2: return _mm_srli_si128(a, 2); case 3: return _mm_srli_si128(a, 3); case 4: return _mm_srli_si128(a, 4); case 5: return _mm_srli_si128(a, 5); case 6: return _mm_srli_si128(a, 6); case 7: return _mm_srli_si128(a, 7); case 8: return _mm_srli_si128(a, 8); } return a; // should not execute here. } } // namespace // Original name: u32toa_sse2 char *ToString(uint32_t value, char* buffer) { if (value < 10000) { const uint32_t d1 = (value / 100) << 1; const uint32_t d2 = (value % 100) << 1; if (value >= 1000) *buffer++ = gDigitsLut[d1]; if (value >= 100) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 10) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; //*buffer++ = '\0'; return buffer; } else if (value < 100000000) { // Experiment shows that this case SSE2 is slower #if 0 const __m128i a = Convert8DigitsSSE2(value); // Convert to bytes, add '0' const __m128i va = _mm_add_epi8(_mm_packus_epi16(a, _mm_setzero_si128()), reinterpret_cast<const __m128i*>(kAsciiZero)[0]); // Count number of digit const unsigned mask = _mm_movemask_epi8(_mm_cmpeq_epi8(va, reinterpret_cast<const __m128i*>(kAsciiZero)[0])); unsigned long digit; #ifdef _MSC_VER _BitScanForward(&digit, ~mask | 0x8000); #else digit = __builtin_ctz(~mask | 0x8000); #endif // Shift digits to the beginning __m128i result = ShiftDigits_SSE2(va, digit); //__m128i result = _mm_srl_epi64(va, _mm_cvtsi32_si128(digit * 8)); _mm_storel_epi64(reinterpret_cast<__m128i*>(buffer), result); buffer[8 - digit] = '\0'; #else // value = bbbbcccc const uint32_t b = value / 10000; const uint32_t c = value % 10000; const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; if (value >= 10000000) *buffer++ = gDigitsLut[d1]; if (value >= 1000000) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 100000) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; // *buffer++ = '\0'; return buffer; #endif } else { // value = aabbbbbbbb in decimal const uint32_t a = value / 100000000; // 1 to 42 value %= 100000000; if (a >= 10) { const unsigned i = a << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else *buffer++ = '0' + static_cast<char>(a); const __m128i b = Convert8DigitsSSE2(value); const __m128i ba = _mm_add_epi8(_mm_packus_epi16(_mm_setzero_si128(), b), reinterpret_cast<const __m128i*>(kAsciiZero)[0]); const __m128i result = _mm_srli_si128(ba, 8); _mm_storel_epi64(reinterpret_cast<__m128i*>(buffer), result); // buffer[8] = '\0'; return buffer + 8; } } // Original name: u64toa_sse2 char *ToString(uint64_t value, char* buffer) { if (value < 100000000) { uint32_t v = static_cast<uint32_t>(value); if (v < 10000) { const uint32_t d1 = (v / 100) << 1; const uint32_t d2 = (v % 100) << 1; if (v >= 1000) *buffer++ = gDigitsLut[d1]; if (v >= 100) *buffer++ = gDigitsLut[d1 + 1]; if (v >= 10) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; //*buffer++ = '\0'; return buffer; } else { // Experiment shows that this case SSE2 is slower #if 0 const __m128i a = Convert8DigitsSSE2(v); // Convert to bytes, add '0' const __m128i va = _mm_add_epi8(_mm_packus_epi16(a, _mm_setzero_si128()), reinterpret_cast<const __m128i*>(kAsciiZero)[0]); // Count number of digit const unsigned mask = _mm_movemask_epi8(_mm_cmpeq_epi8(va, reinterpret_cast<const __m128i*>(kAsciiZero)[0])); unsigned long digit; #ifdef _MSC_VER _BitScanForward(&digit, ~mask | 0x8000); #else digit = __builtin_ctz(~mask | 0x8000); #endif // Shift digits to the beginning __m128i result = ShiftDigits_SSE2(va, digit); _mm_storel_epi64(reinterpret_cast<__m128i*>(buffer), result); buffer[8 - digit] = '\0'; #else // value = bbbbcccc const uint32_t b = v / 10000; const uint32_t c = v % 10000; const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; if (value >= 10000000) *buffer++ = gDigitsLut[d1]; if (value >= 1000000) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 100000) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; //*buffer++ = '\0'; return buffer; #endif } } else if (value < 10000000000000000) { const uint32_t v0 = static_cast<uint32_t>(value / 100000000); const uint32_t v1 = static_cast<uint32_t>(value % 100000000); const __m128i a0 = Convert8DigitsSSE2(v0); const __m128i a1 = Convert8DigitsSSE2(v1); // Convert to bytes, add '0' const __m128i va = _mm_add_epi8(_mm_packus_epi16(a0, a1), reinterpret_cast<const __m128i*>(kAsciiZero)[0]); // Count number of digit const unsigned mask = _mm_movemask_epi8(_mm_cmpeq_epi8(va, reinterpret_cast<const __m128i*>(kAsciiZero)[0])); #ifdef _MSC_VER unsigned long digit; _BitScanForward(&digit, ~mask | 0x8000); #else unsigned digit = __builtin_ctz(~mask | 0x8000); #endif // Shift digits to the beginning __m128i result = ShiftDigits_SSE2(va, digit); _mm_storeu_si128(reinterpret_cast<__m128i*>(buffer), result); // buffer[16 - digit] = '\0'; return &buffer[16 - digit]; } else { const uint32_t a = static_cast<uint32_t>(value / 10000000000000000); // 1 to 1844 value %= 10000000000000000; if (a < 10) *buffer++ = '0' + static_cast<char>(a); else if (a < 100) { const uint32_t i = a << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else if (a < 1000) { *buffer++ = '0' + static_cast<char>(a / 100); const uint32_t i = (a % 100) << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else { const uint32_t i = (a / 100) << 1; const uint32_t j = (a % 100) << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; *buffer++ = gDigitsLut[j]; *buffer++ = gDigitsLut[j + 1]; } const uint32_t v0 = static_cast<uint32_t>(value / 100000000); const uint32_t v1 = static_cast<uint32_t>(value % 100000000); const __m128i a0 = Convert8DigitsSSE2(v0); const __m128i a1 = Convert8DigitsSSE2(v1); // Convert to bytes, add '0' const __m128i va = _mm_add_epi8(_mm_packus_epi16(a0, a1), reinterpret_cast<const __m128i*>(kAsciiZero)[0]); _mm_storeu_si128(reinterpret_cast<__m128i*>(buffer), va); // buffer[16] = '\0'; return &buffer[16]; } } #else // Generic Non-x86 case // Orignal name: u32toa_branchlut char *ToString(uint32_t value, char* buffer) { if (value < 10000) { const uint32_t d1 = (value / 100) << 1; const uint32_t d2 = (value % 100) << 1; if (value >= 1000) *buffer++ = gDigitsLut[d1]; if (value >= 100) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 10) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; } else if (value < 100000000) { // value = bbbbcccc const uint32_t b = value / 10000; const uint32_t c = value % 10000; const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; if (value >= 10000000) *buffer++ = gDigitsLut[d1]; if (value >= 1000000) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 100000) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; } else { // value = aabbbbcccc in decimal const uint32_t a = value / 100000000; // 1 to 42 value %= 100000000; if (a >= 10) { const unsigned i = a << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else *buffer++ = '0' + static_cast<char>(a); const uint32_t b = value / 10000; // 0 to 9999 const uint32_t c = value % 10000; // 0 to 9999 const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; *buffer++ = gDigitsLut[d1]; *buffer++ = gDigitsLut[d1 + 1]; *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; } return buffer; //*buffer++ = '\0'; } // Original name: u64toa_branchlut char *ToString(uint64_t value, char* buffer) { if (value < 100000000) { uint32_t v = static_cast<uint32_t>(value); if (v < 10000) { const uint32_t d1 = (v / 100) << 1; const uint32_t d2 = (v % 100) << 1; if (v >= 1000) *buffer++ = gDigitsLut[d1]; if (v >= 100) *buffer++ = gDigitsLut[d1 + 1]; if (v >= 10) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; } else { // value = bbbbcccc const uint32_t b = v / 10000; const uint32_t c = v % 10000; const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; if (value >= 10000000) *buffer++ = gDigitsLut[d1]; if (value >= 1000000) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 100000) *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; } } else if (value < 10000000000000000) { const uint32_t v0 = static_cast<uint32_t>(value / 100000000); const uint32_t v1 = static_cast<uint32_t>(value % 100000000); const uint32_t b0 = v0 / 10000; const uint32_t c0 = v0 % 10000; const uint32_t d1 = (b0 / 100) << 1; const uint32_t d2 = (b0 % 100) << 1; const uint32_t d3 = (c0 / 100) << 1; const uint32_t d4 = (c0 % 100) << 1; const uint32_t b1 = v1 / 10000; const uint32_t c1 = v1 % 10000; const uint32_t d5 = (b1 / 100) << 1; const uint32_t d6 = (b1 % 100) << 1; const uint32_t d7 = (c1 / 100) << 1; const uint32_t d8 = (c1 % 100) << 1; if (value >= 1000000000000000) *buffer++ = gDigitsLut[d1]; if (value >= 100000000000000) *buffer++ = gDigitsLut[d1 + 1]; if (value >= 10000000000000) *buffer++ = gDigitsLut[d2]; if (value >= 1000000000000) *buffer++ = gDigitsLut[d2 + 1]; if (value >= 100000000000) *buffer++ = gDigitsLut[d3]; if (value >= 10000000000) *buffer++ = gDigitsLut[d3 + 1]; if (value >= 1000000000) *buffer++ = gDigitsLut[d4]; if (value >= 100000000) *buffer++ = gDigitsLut[d4 + 1]; *buffer++ = gDigitsLut[d5]; *buffer++ = gDigitsLut[d5 + 1]; *buffer++ = gDigitsLut[d6]; *buffer++ = gDigitsLut[d6 + 1]; *buffer++ = gDigitsLut[d7]; *buffer++ = gDigitsLut[d7 + 1]; *buffer++ = gDigitsLut[d8]; *buffer++ = gDigitsLut[d8 + 1]; } else { const uint32_t a = static_cast<uint32_t>(value / 10000000000000000); // 1 to 1844 value %= 10000000000000000; if (a < 10) *buffer++ = '0' + static_cast<char>(a); else if (a < 100) { const uint32_t i = a << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else if (a < 1000) { *buffer++ = '0' + static_cast<char>(a / 100); const uint32_t i = (a % 100) << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; } else { const uint32_t i = (a / 100) << 1; const uint32_t j = (a % 100) << 1; *buffer++ = gDigitsLut[i]; *buffer++ = gDigitsLut[i + 1]; *buffer++ = gDigitsLut[j]; *buffer++ = gDigitsLut[j + 1]; } const uint32_t v0 = static_cast<uint32_t>(value / 100000000); const uint32_t v1 = static_cast<uint32_t>(value % 100000000); const uint32_t b0 = v0 / 10000; const uint32_t c0 = v0 % 10000; const uint32_t d1 = (b0 / 100) << 1; const uint32_t d2 = (b0 % 100) << 1; const uint32_t d3 = (c0 / 100) << 1; const uint32_t d4 = (c0 % 100) << 1; const uint32_t b1 = v1 / 10000; const uint32_t c1 = v1 % 10000; const uint32_t d5 = (b1 / 100) << 1; const uint32_t d6 = (b1 % 100) << 1; const uint32_t d7 = (c1 / 100) << 1; const uint32_t d8 = (c1 % 100) << 1; *buffer++ = gDigitsLut[d1]; *buffer++ = gDigitsLut[d1 + 1]; *buffer++ = gDigitsLut[d2]; *buffer++ = gDigitsLut[d2 + 1]; *buffer++ = gDigitsLut[d3]; *buffer++ = gDigitsLut[d3 + 1]; *buffer++ = gDigitsLut[d4]; *buffer++ = gDigitsLut[d4 + 1]; *buffer++ = gDigitsLut[d5]; *buffer++ = gDigitsLut[d5 + 1]; *buffer++ = gDigitsLut[d6]; *buffer++ = gDigitsLut[d6 + 1]; *buffer++ = gDigitsLut[d7]; *buffer++ = gDigitsLut[d7 + 1]; *buffer++ = gDigitsLut[d8]; *buffer++ = gDigitsLut[d8 + 1]; } return buffer; } #endif // End of architecture if statement. // Signed wrappers. The negation is done on the unsigned version because // doing so has defined behavior for INT_MIN. char *ToString(int32_t value, char *to) { uint32_t un = static_cast<uint32_t>(value); if (value < 0) { *to++ = '-'; un = -un; } return ToString(un, to); } char *ToString(int64_t value, char *to) { uint64_t un = static_cast<uint64_t>(value); if (value < 0) { *to++ = '-'; un = -un; } return ToString(un, to); } // No optimization for this case yet. char *ToString(int16_t value, char *to) { return ToString((int32_t)value, to); } char *ToString(uint16_t value, char *to) { return ToString((uint32_t)value, to); } // void * to string. This hasn't been optimized at all really. namespace { const char kHexDigits[] = "0123456789abcdef"; } // namespace char *ToString(const void *v, char *to) { *to++ = '0'; *to++ = 'x'; // Fun fact: gcc/clang boost::lexical_cast on Linux do just "0" while clang on OS X does "0x0" // I happen to prefer 0x0. if (!v) { *to++ = '0'; return to; } uintptr_t value = reinterpret_cast<uintptr_t>(v); uint8_t shift = sizeof(void*) * 8 - 4; for (; !(value >> shift); shift -= 4) {} for (; ; shift -= 4) { *to++ = kHexDigits[(value >> shift) & 0xf]; if (!shift) break; } return to; } } // namespace util
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_14x_multiarchpkg-win-amd64-opt.yml
build: template_file: test-win-opt-base.tyml dependencies: - "node-package-cpu" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" system_setup: > ${system.sox_win} && ${nodejs.win.prep_14} args: tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 14.x 16k" metadata: name: "DeepSpeech Windows AMD64 CPU NodeJS MultiArch Package 14.x tests" description: "Testing DeepSpeech for Windows/AMD64 on NodeJS MultiArch Package v14.x, CPU only, optimized version"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/lexicographic-weight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Lexicographic weight set and associated semiring operation definitions. // // A lexicographic weight is a sequence of weights, each of which must have the // path property and Times() must be (strongly) cancellative // (for all a,b,c != Zero(): Times(c, a) = Times(c, b) => a = b, // Times(a, c) = Times(b, c) => a = b). // The + operation on two weights a and b is the lexicographically // prior of a and b. #ifndef FST_LEXICOGRAPHIC_WEIGHT_H_ #define FST_LEXICOGRAPHIC_WEIGHT_H_ #include <cstdlib> #include <string> #include <fst/log.h> #include <fst/pair-weight.h> #include <fst/weight.h> namespace fst { template <class W1, class W2> class LexicographicWeight : public PairWeight<W1, W2> { public: using ReverseWeight = LexicographicWeight<typename W1::ReverseWeight, typename W2::ReverseWeight>; using PairWeight<W1, W2>::Value1; using PairWeight<W1, W2>::Value2; using PairWeight<W1, W2>::SetValue1; using PairWeight<W1, W2>::SetValue2; using PairWeight<W1, W2>::Zero; using PairWeight<W1, W2>::One; using PairWeight<W1, W2>::NoWeight; using PairWeight<W1, W2>::Quantize; using PairWeight<W1, W2>::Reverse; LexicographicWeight() {} explicit LexicographicWeight(const PairWeight<W1, W2> &w) : PairWeight<W1, W2>(w) {} LexicographicWeight(W1 w1, W2 w2) : PairWeight<W1, W2>(w1, w2) { if ((W1::Properties() & kPath) != kPath) { FSTERROR() << "LexicographicWeight must " << "have the path property: " << W1::Type(); SetValue1(W1::NoWeight()); } if ((W2::Properties() & kPath) != kPath) { FSTERROR() << "LexicographicWeight must " << "have the path property: " << W2::Type(); SetValue2(W2::NoWeight()); } } static const LexicographicWeight &Zero() { static const LexicographicWeight zero(PairWeight<W1, W2>::Zero()); return zero; } static const LexicographicWeight &One() { static const LexicographicWeight one(PairWeight<W1, W2>::One()); return one; } static const LexicographicWeight &NoWeight() { static const LexicographicWeight no_weight(PairWeight<W1, W2>::NoWeight()); return no_weight; } static const string &Type() { static const string *const type = new string(W1::Type() + "_LT_" + W2::Type()); return *type; } bool Member() const { if (!Value1().Member() || !Value2().Member()) return false; // Lexicographic weights cannot mix zeroes and non-zeroes. if (Value1() == W1::Zero() && Value2() == W2::Zero()) return true; if (Value1() != W1::Zero() && Value2() != W2::Zero()) return true; return false; } LexicographicWeight Quantize(float delta = kDelta) const { return LexicographicWeight(PairWeight<W1, W2>::Quantize()); } ReverseWeight Reverse() const { return ReverseWeight(PairWeight<W1, W2>::Reverse()); } static constexpr uint64_t Properties() { return W1::Properties() & W2::Properties() & (kLeftSemiring | kRightSemiring | kPath | kIdempotent | kCommutative); } }; template <class W1, class W2> inline LexicographicWeight<W1, W2> Plus(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v) { if (!w.Member() || !v.Member()) { return LexicographicWeight<W1, W2>::NoWeight(); } NaturalLess<W1> less1; NaturalLess<W2> less2; if (less1(w.Value1(), v.Value1())) return w; if (less1(v.Value1(), w.Value1())) return v; if (less2(w.Value2(), v.Value2())) return w; if (less2(v.Value2(), w.Value2())) return v; return w; } template <class W1, class W2> inline LexicographicWeight<W1, W2> Times(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v) { return LexicographicWeight<W1, W2>(Times(w.Value1(), v.Value1()), Times(w.Value2(), v.Value2())); } template <class W1, class W2> inline LexicographicWeight<W1, W2> Divide(const LexicographicWeight<W1, W2> &w, const LexicographicWeight<W1, W2> &v, DivideType typ = DIVIDE_ANY) { return LexicographicWeight<W1, W2>(Divide(w.Value1(), v.Value1(), typ), Divide(w.Value2(), v.Value2(), typ)); } // This function object generates weights by calling the underlying generators // for the templated weight types, like all other pair weight types. However, // for lexicographic weights, we cannot generate zeroes for the two subweights // separately: weights are members iff both members are zero or both members // are non-zero. This is intended primarily for testing. template <class W1, class W2> class WeightGenerate<LexicographicWeight<W1, W2>> { public: using Weight = LexicographicWeight<W1, W1>; using Generate1 = WeightGenerate<W1>; using Generate2 = WeightGenerate<W2>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : generator1_(false, num_random_weights), generator2_(false, num_random_weights), allow_zero_(allow_zero), num_random_weights_(num_random_weights) {} Weight operator()() const { if (allow_zero_) { const int n = rand() % (num_random_weights_ + 1); // NOLINT if (n == num_random_weights_) return Weight(W1::Zero(), W2::Zero()); } return Weight(generator1_(), generator2_()); } private: const Generate1 generator1_; const Generate2 generator2_; // Permits Zero() and zero divisors. const bool allow_zero_; // The number of alternative random weights. const size_t num_random_weights_; }; } // namespace fst #endif // FST_LEXICOGRAPHIC_WEIGHT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/lib/symbol-table.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes to provide symbol-to-integer and integer-to-symbol mappings. #include <fst/symbol-table.h> #include <fst/flags.h> #include <fst/log.h> #include <fstream> #include <fst/util.h> DEFINE_bool(fst_compat_symbols, true, "Require symbol tables to match when appropriate"); DEFINE_string(fst_field_separator, "\t ", "Set of characters used as a separator between printed fields"); namespace fst { SymbolTableTextOptions::SymbolTableTextOptions(bool allow_negative_labels) : allow_negative_labels(allow_negative_labels), fst_field_separator(FLAGS_fst_field_separator) {} namespace internal { // Maximum line length in textual symbols file. static constexpr int kLineLen = 8096; // Identifies stream data as a symbol table (and its endianity). static constexpr int32_t kSymbolTableMagicNumber = 2125658996; DenseSymbolMap::DenseSymbolMap() : empty_(-1), buckets_(1 << 4), hash_mask_(buckets_.size() - 1) { std::uninitialized_fill(buckets_.begin(), buckets_.end(), empty_); } DenseSymbolMap::DenseSymbolMap(const DenseSymbolMap &other) : empty_(-1), symbols_(other.symbols_), buckets_(other.buckets_), hash_mask_(other.hash_mask_) {} std::pair<int64_t, bool> DenseSymbolMap::InsertOrFind(const string &key) { static constexpr float kMaxOccupancyRatio = 0.75; // Grows when 75% full. if (Size() >= kMaxOccupancyRatio * buckets_.size()) { Rehash(buckets_.size() * 2); } size_t idx = str_hash_(key) & hash_mask_; while (buckets_[idx] != empty_) { const auto stored_value = buckets_[idx]; if (symbols_[stored_value] == key) return {stored_value, false}; idx = (idx + 1) & hash_mask_; } const auto next = Size(); buckets_[idx] = next; symbols_.emplace_back(key); return {next, true}; } int64_t DenseSymbolMap::Find(const string &key) const { size_t idx = str_hash_(key) & hash_mask_; while (buckets_[idx] != empty_) { const auto stored_value = buckets_[idx]; if (symbols_[stored_value] == key) return stored_value; idx = (idx + 1) & hash_mask_; } return buckets_[idx]; } void DenseSymbolMap::Rehash(size_t num_buckets) { buckets_.resize(num_buckets); hash_mask_ = buckets_.size() - 1; std::uninitialized_fill(buckets_.begin(), buckets_.end(), empty_); for (size_t i = 0; i < Size(); ++i) { size_t idx = str_hash_(string(symbols_[i])) & hash_mask_; while (buckets_[idx] != empty_) { idx = (idx + 1) & hash_mask_; } buckets_[idx] = i; } } void DenseSymbolMap::RemoveSymbol(size_t idx) { symbols_.erase(symbols_.begin() + idx); Rehash(buckets_.size()); } SymbolTableImpl *SymbolTableImpl::ReadText(std::istream &strm, const string &filename, const SymbolTableTextOptions &opts) { std::unique_ptr<SymbolTableImpl> impl(new SymbolTableImpl(filename)); int64_t nline = 0; char line[kLineLen]; while (!strm.getline(line, kLineLen).fail()) { ++nline; std::vector<char *> col; const auto separator = opts.fst_field_separator + "\n"; SplitString(line, separator.c_str(), &col, true); if (col.empty()) continue; // Empty line. if (col.size() != 2) { LOG(ERROR) << "SymbolTable::ReadText: Bad number of columns (" << col.size() << "), " << "file = " << filename << ", line = " << nline << ":<" << line << ">"; return nullptr; } const char *symbol = col[0]; const char *value = col[1]; char *p; const auto key = strtoll(value, &p, 10); if (p < value + strlen(value) || (!opts.allow_negative_labels && key < 0) || key == kNoSymbol) { LOG(ERROR) << "SymbolTable::ReadText: Bad non-negative integer \"" << value << "\", " << "file = " << filename << ", line = " << nline; return nullptr; } impl->AddSymbol(symbol, key); } return impl.release(); } void SymbolTableImpl::MaybeRecomputeCheckSum() const { { ReaderMutexLock check_sum_lock(&check_sum_mutex_); if (check_sum_finalized_) return; } // We'll acquire an exclusive lock to recompute the checksums. MutexLock check_sum_lock(&check_sum_mutex_); if (check_sum_finalized_) { // Another thread (coming in around the same time return; // might have done it already). So we recheck. } // Calculates the original label-agnostic checksum. CheckSummer check_sum; for (size_t i = 0; i < symbols_.Size(); ++i) { const auto &symbol = symbols_.GetSymbol(i); check_sum.Update(symbol.data(), symbol.size()); check_sum.Update("", 1); } check_sum_string_ = check_sum.Digest(); // Calculates the safer, label-dependent checksum. CheckSummer labeled_check_sum; for (int64_t i = 0; i < dense_key_limit_; ++i) { std::ostringstream line; line << symbols_.GetSymbol(i) << '\t' << i; labeled_check_sum.Update(line.str().data(), line.str().size()); } using citer = map<int64_t, int64_t>::const_iterator; for (citer it = key_map_.begin(); it != key_map_.end(); ++it) { // TODO(tombagby, 2013-11-22) This line maintains a bug that ignores // negative labels in the checksum that too many tests rely on. if (it->first < dense_key_limit_) continue; std::ostringstream line; line << symbols_.GetSymbol(it->second) << '\t' << it->first; labeled_check_sum.Update(line.str().data(), line.str().size()); } labeled_check_sum_string_ = labeled_check_sum.Digest(); check_sum_finalized_ = true; } int64_t SymbolTableImpl::AddSymbol(const string &symbol, int64_t key) { if (key == kNoSymbol) return key; const auto insert_key = symbols_.InsertOrFind(symbol); if (!insert_key.second) { const auto key_already = GetNthKey(insert_key.first); if (key_already == key) return key; VLOG(1) << "SymbolTable::AddSymbol: symbol = " << symbol << " already in symbol_map_ with key = " << key_already << " but supplied new key = " << key << " (ignoring new key)"; return key_already; } if (key == (symbols_.Size() - 1) && key == dense_key_limit_) { ++dense_key_limit_; } else { idx_key_.push_back(key); key_map_[key] = symbols_.Size() - 1; } if (key >= available_key_) available_key_ = key + 1; check_sum_finalized_ = false; return key; } // TODO(rybach): Consider a more efficient implementation which re-uses holes in // the dense-key range or re-arranges the dense-key range from time to time. void SymbolTableImpl::RemoveSymbol(const int64_t key) { auto idx = key; if (key < 0 || key >= dense_key_limit_) { auto iter = key_map_.find(key); if (iter == key_map_.end()) return; idx = iter->second; key_map_.erase(iter); } if (idx < 0 || idx >= symbols_.Size()) return; symbols_.RemoveSymbol(idx); // Removed one symbol, all indexes > idx are shifted by -1. for (auto &k : key_map_) { if (k.second > idx) --k.second; } if (key >= 0 && key < dense_key_limit_) { // Removal puts a hole in the dense key range. Adjusts range to [0, key). const auto new_dense_key_limit = key; for (int64_t i = key + 1; i < dense_key_limit_; ++i) { key_map_[i] = i - 1; } // Moves existing values in idx_key to new place. idx_key_.resize(symbols_.Size() - new_dense_key_limit); for (int64_t i = symbols_.Size(); i >= dense_key_limit_; --i) { idx_key_[i - new_dense_key_limit - 1] = idx_key_[i - dense_key_limit_]; } // Adds indexes for previously dense keys. for (int64_t i = new_dense_key_limit; i < dense_key_limit_ - 1; ++i) { idx_key_[i - new_dense_key_limit] = i + 1; } dense_key_limit_ = new_dense_key_limit; } else { // Remove entry for removed index in idx_key. for (int64_t i = idx - dense_key_limit_; i < idx_key_.size() - 1; ++i) { idx_key_[i] = idx_key_[i + 1]; } idx_key_.pop_back(); } if (key == available_key_ - 1) available_key_ = key; } SymbolTableImpl *SymbolTableImpl::Read(std::istream &strm, const SymbolTableReadOptions &opts) { int32_t magic_number = 0; ReadType(strm, &magic_number); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } string name; ReadType(strm, &name); std::unique_ptr<SymbolTableImpl> impl(new SymbolTableImpl(name)); ReadType(strm, &impl->available_key_); int64_t size; ReadType(strm, &size); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } string symbol; int64_t key; impl->check_sum_finalized_ = false; for (int64_t i = 0; i < size; ++i) { ReadType(strm, &symbol); ReadType(strm, &key); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Read: Read failed"; return nullptr; } impl->AddSymbol(symbol, key); } return impl.release(); } bool SymbolTableImpl::Write(std::ostream &strm) const { WriteType(strm, kSymbolTableMagicNumber); WriteType(strm, name_); WriteType(strm, available_key_); const int64_t size = symbols_.Size(); WriteType(strm, size); for (int64_t i = 0; i < size; ++i) { auto key = (i < dense_key_limit_) ? i : idx_key_[i - dense_key_limit_]; WriteType(strm, symbols_.GetSymbol(i)); WriteType(strm, key); } strm.flush(); if (strm.fail()) { LOG(ERROR) << "SymbolTable::Write: Write failed"; return false; } return true; } } // namespace internal void SymbolTable::AddTable(const SymbolTable &table) { MutateCheck(); for (SymbolTableIterator iter(table); !iter.Done(); iter.Next()) { impl_->AddSymbol(iter.Symbol()); } } bool SymbolTable::WriteText(std::ostream &strm, const SymbolTableTextOptions &opts) const { if (opts.fst_field_separator.empty()) { LOG(ERROR) << "Missing required field separator"; return false; } bool once_only = false; for (SymbolTableIterator iter(*this); !iter.Done(); iter.Next()) { std::ostringstream line; if (iter.Value() < 0 && !opts.allow_negative_labels && !once_only) { LOG(WARNING) << "Negative symbol table entry when not allowed"; once_only = true; } line << iter.Symbol() << opts.fst_field_separator[0] << iter.Value() << '\n'; strm.write(line.str().data(), line.str().length()); } return true; } bool CompatSymbols(const SymbolTable *syms1, const SymbolTable *syms2, bool warning) { // Flag can explicitly override this check. if (!FLAGS_fst_compat_symbols) return true; if (syms1 && syms2 && (syms1->LabeledCheckSum() != syms2->LabeledCheckSum())) { if (warning) { LOG(WARNING) << "CompatSymbols: Symbol table checksums do not match. " << "Table sizes are " << syms1->NumSymbols() << " and " << syms2->NumSymbols(); } return false; } else { return true; } } void SymbolTableToString(const SymbolTable *table, string *result) { std::ostringstream ostrm; table->Write(ostrm); *result = ostrm.str(); } SymbolTable *StringToSymbolTable(const string &str) { std::istringstream istrm(str); return SymbolTable::Read(istrm, SymbolTableReadOptions()); } } // namespace fst
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests/test_tacotron2_train.py
import glob import json import os import shutil from trainer import get_last_checkpoint from tests import get_device_id, get_tests_output_path, run_cli from TTS.tts.configs.tacotron2_config import Tacotron2Config config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") config = Tacotron2Config( r=5, batch_size=8, eval_batch_size=8, num_loader_workers=0, num_eval_loader_workers=0, text_cleaner="english_cleaners", use_phonemes=False, phoneme_language="en-us", phoneme_cache_path=os.path.join(get_tests_output_path(), "train_outputs/phoneme_cache/"), run_eval=True, test_delay_epochs=-1, epochs=1, print_step=1, test_sentences=[ "Be a voice, not an echo.", ], print_eval=True, max_decoder_steps=50, ) config.audio.do_trim_silence = True config.audio.trim_db = 60 config.save_json(config_path) # train the model for one epoch command_train = ( f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " f"--coqpit.output_path {output_path} " "--coqpit.datasets.0.formatter ljspeech " "--coqpit.datasets.0.meta_file_train metadata.csv " "--coqpit.datasets.0.meta_file_val metadata.csv " "--coqpit.datasets.0.path tests/data/ljspeech " "--coqpit.test_delay_epochs 0 " ) run_cli(command_train) # Find latest folder continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) # Inference using TTS API continue_config_path = os.path.join(continue_path, "config.json") continue_restore_path, _ = get_last_checkpoint(continue_path) out_wav_path = os.path.join(get_tests_output_path(), "output.wav") # Check integrity of the config with open(continue_config_path, "r", encoding="utf-8") as f: config_loaded = json.load(f) assert config_loaded["characters"] is not None assert config_loaded["output_path"] in continue_path assert config_loaded["test_delay_epochs"] == 0 # Load the model and run inference inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" run_cli(inference_command) # restore the model and continue training for one more epoch command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " run_cli(command_train) shutil.rmtree(continue_path)
0
coqui_public_repos/STT-models/romansh-sursilvan/itml
coqui_public_repos/STT-models/romansh-sursilvan/itml/v0.1.0/alphabet.txt
' a b c d e f g h i j k l m n o p q r s t u v w x y z à ä è é î ò ö ü
0
coqui_public_repos/STT-models/amharic/itml
coqui_public_repos/STT-models/amharic/itml/v0.1.0/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT
coqui_public_repos/STT/data/alphabet.txt
# Each line in this file represents the Unicode codepoint (UTF-8 encoded) # associated with a numeric label. # A line that starts with # is a comment. You can escape it with \# if you wish # to use '#' as a label. a b c d e f g h i j k l m n o p q r s t u v w x y z ' # The last (non-comment) line needs to end with a newline.
0
coqui_public_repos/STT-models/german/aashishag
coqui_public_repos/STT-models/german/aashishag/v0.9.0/alphabet.txt
# Each line in this file represents the Unicode codepoint (UTF-8 encoded) # associated with a numeric label. # A line that starts with # is a comment. You can escape it with \# if you wish # to use '#' as a label. a b c d e f g h i j k l m n o p q r s t u v w x y z ä ö ü ' # The last (non-comment) line needs to end with a newline.
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_35-darwin-amd64-opt.yml
build: template_file: test-darwin-opt-base.tyml dependencies: - "darwin-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" - "homebrew_tests-darwin-amd64" test_model_task: "test-training_16k-linux-amd64-py36m-opt" args: tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.5.8:m 16k" metadata: name: "DeepSpeech OSX AMD64 CPU Python v3.5 tests" description: "Testing DeepSpeech for OSX/AMD64 on Python v3.5, CPU only, optimized version"
0
coqui_public_repos/Trainer
coqui_public_repos/Trainer/trainer/torch.py
import numpy as np import torch from torch.utils.data.distributed import DistributedSampler class DistributedSamplerWrapper(DistributedSampler): """Wrapper over Sampler for distributed training. It allows you to use any sampler in distributed mode. It is especially useful in conjunction with torch.nn.parallel.DistributedDataParallel. In such a case, each process can pass a torch.utils.data.DistributedSampler instance as a torch.utils.data.DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note: Dataset is assumed to be of constant size. Args: sampler: Sampler used for subsampling. num_replicas (int, optional): Number of processes participating in distributed training. By default, world_size is retrieved from the current distributed group. rank (int, optional): Rank of the current process within num_replicas. By default, rank is retrieved from the current distributed group. shuffle (bool, optional): If True, sampler will shuffle the indices. Default: True. seed (int, optional): random seed used to shuffle the sampler if shuffle=True. This number should be identical across all processes in the distributed group. Default: 0. Reference: https://github.com/pytorch/pytorch/issues/23430 """ def __init__( self, sampler, num_replicas: int = None, rank: int = None, shuffle: bool = True, seed: int = 0, ): super().__init__( sampler, num_replicas=num_replicas, rank=rank, shuffle=shuffle, seed=seed, ) def __iter__(self): indices = list(self.dataset)[: self.total_size] # Add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size, f"{len(indices)} != {self.total_size}" # Subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples, f"{len(indices)} != {self.num_samples}" return iter(indices) def set_epoch(self, epoch): super().set_epoch(epoch) if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) elif hasattr(self.dataset, "generator"): self.dataset.generator = torch.Generator().manual_seed(self.seed + epoch) def state_dict(self): return self.dataset.state_dict() def load_state_dict(self, state_dict): self.dataset.load_state_dict(state_dict) # pylint: disable=protected-access class NoamLR(torch.optim.lr_scheduler._LRScheduler): def __init__(self, optimizer, warmup_steps=0.1, last_epoch=-1): self.warmup_steps = float(warmup_steps) super().__init__(optimizer, last_epoch) def get_lr(self): step = max(self.last_epoch, 1) return [ base_lr * self.warmup_steps**0.5 * min(step * self.warmup_steps**-1.5, step**-0.5) for base_lr in self.base_lrs ] # pylint: disable=protected-access class StepwiseGradualLR(torch.optim.lr_scheduler._LRScheduler): """Hardcoded step-wise learning rate scheduling. Necessary for CapacitronVAE""" def __init__(self, optimizer, gradual_learning_rates, last_epoch=-1): self.gradual_learning_rates = gradual_learning_rates super().__init__(optimizer, last_epoch) def get_lr(self): step = max(self.last_epoch, 1) step_thresholds = [] rates = [] for values in self.gradual_learning_rates: step_thresholds.append(values[0]) rates.append(values[1]) boolean_indeces = np.less_equal(step_thresholds, step) try: last_true = np.where(boolean_indeces == True)[0][-1] # pylint: disable=singleton-comparison except IndexError: # For the steps larger than the last step in the list pass lr = rates[np.max(last_true, 0)] # Return last lr if step is above the set threshold lr = rates[-1] if step > step_thresholds[-1] else lr # Return first lr if step is below the second threshold - first is initial lr lr = rates[0] if step < step_thresholds[1] else lr return np.tile(lr, len(self.base_lrs)) # hack?
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/fft2d/fft.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Declarations for 1D FFT routines in third_party/fft2d/fft2d. #ifndef FFT2D_FFT_H__ #define FFT2D_FFT_H__ #ifdef __cplusplus extern "C" { #endif extern void cdft(int, int, double *, int *, double *); extern void rdft(int, int, double *, int *, double *); extern void ddct(int, int, double *, int *, double *); extern void ddst(int, int, double *, int *, double *); extern void dfct(int, double *, double *, int *, double *); extern void dfst(int, double *, double *, int *, double *); #ifdef __cplusplus } #endif #endif // FFT2D_FFT_H__
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/union.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to compute the union of two FSTs. #ifndef FST_UNION_H_ #define FST_UNION_H_ #include <algorithm> #include <vector> #include <fst/mutable-fst.h> #include <fst/rational.h> namespace fst { // Computes the union (sum) of two FSTs. This version writes the union to an // output MutableFst. If A transduces string x to y with weight a and B // transduces string w to v with weight b, then their union transduces x to y // with weight a and w to v with weight b. // // Complexity: // // Time: (V_2 + E_2) // Space: O(V_2 + E_2) // // where Vi is the number of states, and Ei is the number of arcs, in the ith // FST. template <class Arc> void Union(MutableFst<Arc> *fst1, const Fst<Arc> &fst2) { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // Checks for symbol table compatibility. if (!CompatSymbols(fst1->InputSymbols(), fst2.InputSymbols()) || !CompatSymbols(fst1->OutputSymbols(), fst2.OutputSymbols())) { FSTERROR() << "Union: Input/output symbol tables of 1st argument " << "do not match input/output symbol tables of 2nd argument"; fst1->SetProperties(kError, kError); return; } const auto numstates1 = fst1->NumStates(); const bool initial_acyclic1 = fst1->Properties(kInitialAcyclic, true); const auto props1 = fst1->Properties(kFstProperties, false); const auto props2 = fst2.Properties(kFstProperties, false); const auto start2 = fst2.Start(); if (start2 == kNoStateId) { if (props2 & kError) fst1->SetProperties(kError, kError); return; } if (fst2.Properties(kExpanded, false)) { fst1->ReserveStates(numstates1 + CountStates(fst2) + (initial_acyclic1 ? 0 : 1)); } for (StateIterator<Fst<Arc>> siter(fst2); !siter.Done(); siter.Next()) { const auto s1 = fst1->AddState(); const auto s2 = siter.Value(); fst1->SetFinal(s1, fst2.Final(s2)); fst1->ReserveArcs(s1, fst2.NumArcs(s2)); for (ArcIterator<Fst<Arc>> aiter(fst2, s2); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); // Copy intended. arc.nextstate += numstates1; fst1->AddArc(s1, arc); } } const auto start1 = fst1->Start(); if (start1 == kNoStateId) { fst1->SetStart(start2); fst1->SetProperties(props2, kCopyProperties); return; } if (initial_acyclic1) { fst1->AddArc(start1, Arc(0, 0, Weight::One(), start2 + numstates1)); } else { const auto nstart1 = fst1->AddState(); fst1->SetStart(nstart1); fst1->AddArc(nstart1, Arc(0, 0, Weight::One(), start1)); fst1->AddArc(nstart1, Arc(0, 0, Weight::One(), start2 + numstates1)); } fst1->SetProperties(UnionProperties(props1, props2), kFstProperties); } // Computes the union of two FSTs, modifying the RationalFst argument. template <class Arc> void Union(RationalFst<Arc> *fst1, const Fst<Arc> &fst2) { fst1->GetMutableImpl()->AddUnion(fst2); } using UnionFstOptions = RationalFstOptions; // Computes the union (sum) of two FSTs. This version is a delayed FST. If A // transduces string x to y with weight a and B transduces string w to v with // weight b, then their union transduces x to y with weight a and w to v with // weight b. // // Complexity: // // Time: O(v_1 + e_1 + v_2 + e_2) // Space: O(v_1 + v_2) // // where vi is the number of states visited, and ei is the number of arcs // visited, in the ith FST. Constant time and space to visit an input state or // arc is assumed and exclusive of caching. template <class A> class UnionFst : public RationalFst<A> { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; UnionFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2) { GetMutableImpl()->InitUnion(fst1, fst2); } UnionFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const UnionFstOptions &opts) : RationalFst<Arc>(opts) { GetMutableImpl()->InitUnion(fst1, fst2); } // See Fst<>::Copy() for doc. UnionFst(const UnionFst<Arc> &fst, bool safe = false) : RationalFst<Arc>(fst, safe) {} // Gets a copy of this UnionFst. See Fst<>::Copy() for further doc. UnionFst<Arc> *Copy(bool safe = false) const override { return new UnionFst<Arc>(*this, safe); } private: using ImplToFst<internal::RationalFstImpl<Arc>>::GetImpl; using ImplToFst<internal::RationalFstImpl<Arc>>::GetMutableImpl; }; // Specialization for UnionFst. template <class Arc> class StateIterator<UnionFst<Arc>> : public StateIterator<RationalFst<Arc>> { public: explicit StateIterator(const UnionFst<Arc> &fst) : StateIterator<RationalFst<Arc>>(fst) {} }; // Specialization for UnionFst. template <class Arc> class ArcIterator<UnionFst<Arc>> : public ArcIterator<RationalFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const UnionFst<Arc> &fst, StateId s) : ArcIterator<RationalFst<Arc>>(fst, s) {} }; using StdUnionFst = UnionFst<StdArc>; } // namespace fst #endif // FST_UNION_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/relabel.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/relabel.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Relabel(MutableFstClass *ofst, const SymbolTable *old_isyms, const SymbolTable *relabel_isyms, const string &unknown_isymbol, bool attach_new_isyms, const SymbolTable *old_osyms, const SymbolTable *relabel_osyms, const string &unknown_osymbol, bool attach_new_osyms) { RelabelArgs1 args(ofst, old_isyms, relabel_isyms, unknown_isymbol, attach_new_isyms, old_osyms, relabel_osyms, unknown_osymbol, attach_new_osyms); Apply<Operation<RelabelArgs1>>("Relabel", ofst->ArcType(), &args); } void Relabel(MutableFstClass *ofst, const std::vector<LabelPair> &ipairs, const std::vector<LabelPair> &opairs) { RelabelArgs2 args(ofst, ipairs, opairs); Apply<Operation<RelabelArgs2>>("Relabel", ofst->ArcType(), &args); } REGISTER_FST_OPERATION(Relabel, StdArc, RelabelArgs1); REGISTER_FST_OPERATION(Relabel, LogArc, RelabelArgs1); REGISTER_FST_OPERATION(Relabel, Log64Arc, RelabelArgs1); REGISTER_FST_OPERATION(Relabel, StdArc, RelabelArgs2); REGISTER_FST_OPERATION(Relabel, LogArc, RelabelArgs2); REGISTER_FST_OPERATION(Relabel, Log64Arc, RelabelArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/util/multi_intersection.hh
#ifndef UTIL_MULTI_INTERSECTION_H #define UTIL_MULTI_INTERSECTION_H #include <boost/optional.hpp> #include <boost/range/iterator_range.hpp> #include <algorithm> #include <functional> #include <vector> namespace util { namespace detail { template <class Range> struct RangeLessBySize : public std::binary_function<const Range &, const Range &, bool> { bool operator()(const Range &left, const Range &right) const { return left.size() < right.size(); } }; /* Takes sets specified by their iterators and a boost::optional containing * the lowest intersection if any. Each set must be sorted in increasing * order. sets is changed to truncate the beginning of each sequence to the * location of the match or an empty set. Precondition: sets is not empty * since the intersection over null is the universe and this function does not * know the universe. */ template <class Iterator, class Less> boost::optional<typename std::iterator_traits<Iterator>::value_type> FirstIntersectionSorted(std::vector<boost::iterator_range<Iterator> > &sets, const Less &less = std::less<typename std::iterator_traits<Iterator>::value_type>()) { typedef std::vector<boost::iterator_range<Iterator> > Sets; typedef typename std::iterator_traits<Iterator>::value_type Value; assert(!sets.empty()); if (sets.front().empty()) return boost::optional<Value>(); // Possibly suboptimal to copy for general Value; makes unsigned int go slightly faster. Value highest(sets.front().front()); for (typename Sets::iterator i(sets.begin()); i != sets.end(); ) { i->advance_begin(std::lower_bound(i->begin(), i->end(), highest, less) - i->begin()); if (i->empty()) return boost::optional<Value>(); if (less(highest, i->front())) { highest = i->front(); // start over i = sets.begin(); } else { ++i; } } return boost::optional<Value>(highest); } } // namespace detail template <class Iterator, class Less> boost::optional<typename std::iterator_traits<Iterator>::value_type> FirstIntersection(std::vector<boost::iterator_range<Iterator> > &sets, const Less less) { assert(!sets.empty()); std::sort(sets.begin(), sets.end(), detail::RangeLessBySize<boost::iterator_range<Iterator> >()); return detail::FirstIntersectionSorted(sets, less); } template <class Iterator> boost::optional<typename std::iterator_traits<Iterator>::value_type> FirstIntersection(std::vector<boost::iterator_range<Iterator> > &sets) { return FirstIntersection(sets, std::less<typename std::iterator_traits<Iterator>::value_type>()); } template <class Iterator, class Output, class Less> void AllIntersection(std::vector<boost::iterator_range<Iterator> > &sets, Output &out, const Less less) { typedef typename std::iterator_traits<Iterator>::value_type Value; assert(!sets.empty()); std::sort(sets.begin(), sets.end(), detail::RangeLessBySize<boost::iterator_range<Iterator> >()); boost::optional<Value> ret; for (boost::optional<Value> ret; (ret = detail::FirstIntersectionSorted(sets, less)); sets.front().advance_begin(1)) { out(*ret); } } template <class Iterator, class Output> void AllIntersection(std::vector<boost::iterator_range<Iterator> > &sets, Output &out) { AllIntersection(sets, out, std::less<typename std::iterator_traits<Iterator>::value_type>()); } } // namespace util #endif // UTIL_MULTI_INTERSECTION_H
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/algo_test.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Regression test for various FST algorithms. #ifndef FST_TEST_ALGO_TEST_H_ #define FST_TEST_ALGO_TEST_H_ #include <fst/log.h> #include <fst/fstlib.h> #include <fst/test/rand-fst.h> DECLARE_int32(repeat); // defined in ./algo_test.cc namespace fst { // Mapper to change input and output label of every transition into // epsilons. template <class A> class EpsMapper { public: EpsMapper() {} A operator()(const A &arc) const { return A(0, 0, arc.weight, arc.nextstate); } uint64 Properties(uint64 props) const { props &= ~kNotAcceptor; props |= kAcceptor; props &= ~kNoIEpsilons & ~kNoOEpsilons & ~kNoEpsilons; props |= kIEpsilons | kOEpsilons | kEpsilons; props &= ~kNotILabelSorted & ~kNotOLabelSorted; props |= kILabelSorted | kOLabelSorted; return props; } MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } MapSymbolsAction InputSymbolsAction() const { return MAP_COPY_SYMBOLS; } MapSymbolsAction OutputSymbolsAction() const { return MAP_COPY_SYMBOLS; } }; // Generic - no lookahead. template <class Arc> void LookAheadCompose(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2, MutableFst<Arc> *ofst) { Compose(ifst1, ifst2, ofst); } // Specialized and epsilon olabel acyclic - lookahead. void LookAheadCompose(const Fst<StdArc> &ifst1, const Fst<StdArc> &ifst2, MutableFst<StdArc> *ofst) { std::vector<StdArc::StateId> order; bool acyclic; TopOrderVisitor<StdArc> visitor(&order, &acyclic); DfsVisit(ifst1, &visitor, OutputEpsilonArcFilter<StdArc>()); if (acyclic) { // no ifst1 output epsilon cycles? StdOLabelLookAheadFst lfst1(ifst1); StdVectorFst lfst2(ifst2); LabelLookAheadRelabeler<StdArc>::Relabel(&lfst2, lfst1, true); Compose(lfst1, lfst2, ofst); } else { Compose(ifst1, ifst2, ofst); } } // This class tests a variety of identities and properties that must // hold for various algorithms on weighted FSTs. template <class Arc, class WeightGenerator> class WeightedTester { public: typedef typename Arc::Label Label; typedef typename Arc::StateId StateId; typedef typename Arc::Weight Weight; WeightedTester(time_t seed, const Fst<Arc> &zero_fst, const Fst<Arc> &one_fst, const Fst<Arc> &univ_fst, WeightGenerator *weight_generator) : seed_(seed), zero_fst_(zero_fst), one_fst_(one_fst), univ_fst_(univ_fst), weight_generator_(weight_generator) {} void Test(const Fst<Arc> &T1, const Fst<Arc> &T2, const Fst<Arc> &T3) { TestRational(T1, T2, T3); TestMap(T1); TestCompose(T1, T2, T3); TestSort(T1); TestOptimize(T1); TestSearch(T1); } private: // Tests rational operations with identities void TestRational(const Fst<Arc> &T1, const Fst<Arc> &T2, const Fst<Arc> &T3) { { VLOG(1) << "Check destructive and delayed union are equivalent."; VectorFst<Arc> U1(T1); Union(&U1, T2); UnionFst<Arc> U2(T1, T2); CHECK(Equiv(U1, U2)); } { VLOG(1) << "Check destructive and delayed concatenation are equivalent."; VectorFst<Arc> C1(T1); Concat(&C1, T2); ConcatFst<Arc> C2(T1, T2); CHECK(Equiv(C1, C2)); VectorFst<Arc> C3(T2); Concat(T1, &C3); CHECK(Equiv(C3, C2)); } { VLOG(1) << "Check destructive and delayed closure* are equivalent."; VectorFst<Arc> C1(T1); Closure(&C1, CLOSURE_STAR); ClosureFst<Arc> C2(T1, CLOSURE_STAR); CHECK(Equiv(C1, C2)); } { VLOG(1) << "Check destructive and delayed closure+ are equivalent."; VectorFst<Arc> C1(T1); Closure(&C1, CLOSURE_PLUS); ClosureFst<Arc> C2(T1, CLOSURE_PLUS); CHECK(Equiv(C1, C2)); } { VLOG(1) << "Check union is associative (destructive)."; VectorFst<Arc> U1(T1); Union(&U1, T2); Union(&U1, T3); VectorFst<Arc> U3(T2); Union(&U3, T3); VectorFst<Arc> U4(T1); Union(&U4, U3); CHECK(Equiv(U1, U4)); } { VLOG(1) << "Check union is associative (delayed)."; UnionFst<Arc> U1(T1, T2); UnionFst<Arc> U2(U1, T3); UnionFst<Arc> U3(T2, T3); UnionFst<Arc> U4(T1, U3); CHECK(Equiv(U2, U4)); } { VLOG(1) << "Check union is associative (destructive delayed)."; UnionFst<Arc> U1(T1, T2); Union(&U1, T3); UnionFst<Arc> U3(T2, T3); UnionFst<Arc> U4(T1, U3); CHECK(Equiv(U1, U4)); } { VLOG(1) << "Check concatenation is associative (destructive)."; VectorFst<Arc> C1(T1); Concat(&C1, T2); Concat(&C1, T3); VectorFst<Arc> C3(T2); Concat(&C3, T3); VectorFst<Arc> C4(T1); Concat(&C4, C3); CHECK(Equiv(C1, C4)); } { VLOG(1) << "Check concatenation is associative (delayed)."; ConcatFst<Arc> C1(T1, T2); ConcatFst<Arc> C2(C1, T3); ConcatFst<Arc> C3(T2, T3); ConcatFst<Arc> C4(T1, C3); CHECK(Equiv(C2, C4)); } { VLOG(1) << "Check concatenation is associative (destructive delayed)."; ConcatFst<Arc> C1(T1, T2); Concat(&C1, T3); ConcatFst<Arc> C3(T2, T3); ConcatFst<Arc> C4(T1, C3); CHECK(Equiv(C1, C4)); } if (Weight::Properties() & kLeftSemiring) { VLOG(1) << "Check concatenation left distributes" << " over union (destructive)."; VectorFst<Arc> U1(T1); Union(&U1, T2); VectorFst<Arc> C1(T3); Concat(&C1, U1); VectorFst<Arc> C2(T3); Concat(&C2, T1); VectorFst<Arc> C3(T3); Concat(&C3, T2); VectorFst<Arc> U2(C2); Union(&U2, C3); CHECK(Equiv(C1, U2)); } if (Weight::Properties() & kRightSemiring) { VLOG(1) << "Check concatenation right distributes" << " over union (destructive)."; VectorFst<Arc> U1(T1); Union(&U1, T2); VectorFst<Arc> C1(U1); Concat(&C1, T3); VectorFst<Arc> C2(T1); Concat(&C2, T3); VectorFst<Arc> C3(T2); Concat(&C3, T3); VectorFst<Arc> U2(C2); Union(&U2, C3); CHECK(Equiv(C1, U2)); } if (Weight::Properties() & kLeftSemiring) { VLOG(1) << "Check concatenation left distributes over union (delayed)."; UnionFst<Arc> U1(T1, T2); ConcatFst<Arc> C1(T3, U1); ConcatFst<Arc> C2(T3, T1); ConcatFst<Arc> C3(T3, T2); UnionFst<Arc> U2(C2, C3); CHECK(Equiv(C1, U2)); } if (Weight::Properties() & kRightSemiring) { VLOG(1) << "Check concatenation right distributes over union (delayed)."; UnionFst<Arc> U1(T1, T2); ConcatFst<Arc> C1(U1, T3); ConcatFst<Arc> C2(T1, T3); ConcatFst<Arc> C3(T2, T3); UnionFst<Arc> U2(C2, C3); CHECK(Equiv(C1, U2)); } if (Weight::Properties() & kLeftSemiring) { VLOG(1) << "Check T T* == T+ (destructive)."; VectorFst<Arc> S(T1); Closure(&S, CLOSURE_STAR); VectorFst<Arc> C(T1); Concat(&C, S); VectorFst<Arc> P(T1); Closure(&P, CLOSURE_PLUS); CHECK(Equiv(C, P)); } if (Weight::Properties() & kRightSemiring) { VLOG(1) << "Check T* T == T+ (destructive)."; VectorFst<Arc> S(T1); Closure(&S, CLOSURE_STAR); VectorFst<Arc> C(S); Concat(&C, T1); VectorFst<Arc> P(T1); Closure(&P, CLOSURE_PLUS); CHECK(Equiv(C, P)); } if (Weight::Properties() & kLeftSemiring) { VLOG(1) << "Check T T* == T+ (delayed)."; ClosureFst<Arc> S(T1, CLOSURE_STAR); ConcatFst<Arc> C(T1, S); ClosureFst<Arc> P(T1, CLOSURE_PLUS); CHECK(Equiv(C, P)); } if (Weight::Properties() & kRightSemiring) { VLOG(1) << "Check T* T == T+ (delayed)."; ClosureFst<Arc> S(T1, CLOSURE_STAR); ConcatFst<Arc> C(S, T1); ClosureFst<Arc> P(T1, CLOSURE_PLUS); CHECK(Equiv(C, P)); } } // Tests map-based operations. void TestMap(const Fst<Arc> &T) { { VLOG(1) << "Check destructive and delayed projection are equivalent."; VectorFst<Arc> P1(T); Project(&P1, PROJECT_INPUT); ProjectFst<Arc> P2(T, PROJECT_INPUT); CHECK(Equiv(P1, P2)); } { VLOG(1) << "Check destructive and delayed inversion are equivalent."; VectorFst<Arc> I1(T); Invert(&I1); InvertFst<Arc> I2(T); CHECK(Equiv(I1, I2)); } { VLOG(1) << "Check Pi_1(T) = Pi_2(T^-1) (destructive)."; VectorFst<Arc> P1(T); VectorFst<Arc> I1(T); Project(&P1, PROJECT_INPUT); Invert(&I1); Project(&I1, PROJECT_OUTPUT); CHECK(Equiv(P1, I1)); } { VLOG(1) << "Check Pi_2(T) = Pi_1(T^-1) (destructive)."; VectorFst<Arc> P1(T); VectorFst<Arc> I1(T); Project(&P1, PROJECT_OUTPUT); Invert(&I1); Project(&I1, PROJECT_INPUT); CHECK(Equiv(P1, I1)); } { VLOG(1) << "Check Pi_1(T) = Pi_2(T^-1) (delayed)."; ProjectFst<Arc> P1(T, PROJECT_INPUT); InvertFst<Arc> I1(T); ProjectFst<Arc> P2(I1, PROJECT_OUTPUT); CHECK(Equiv(P1, P2)); } { VLOG(1) << "Check Pi_2(T) = Pi_1(T^-1) (delayed)."; ProjectFst<Arc> P1(T, PROJECT_OUTPUT); InvertFst<Arc> I1(T); ProjectFst<Arc> P2(I1, PROJECT_INPUT); CHECK(Equiv(P1, P2)); } { VLOG(1) << "Check destructive relabeling"; static const int kNumLabels = 10; // set up relabeling pairs std::vector<Label> labelset(kNumLabels); for (size_t i = 0; i < kNumLabels; ++i) labelset[i] = i; for (size_t i = 0; i < kNumLabels; ++i) { using std::swap; swap(labelset[i], labelset[rand() % kNumLabels]); } std::vector<std::pair<Label, Label>> ipairs1(kNumLabels); std::vector<std::pair<Label, Label>> opairs1(kNumLabels); for (size_t i = 0; i < kNumLabels; ++i) { ipairs1[i] = std::make_pair(i, labelset[i]); opairs1[i] = std::make_pair(labelset[i], i); } VectorFst<Arc> R(T); Relabel(&R, ipairs1, opairs1); std::vector<std::pair<Label, Label>> ipairs2(kNumLabels); std::vector<std::pair<Label, Label>> opairs2(kNumLabels); for (size_t i = 0; i < kNumLabels; ++i) { ipairs2[i] = std::make_pair(labelset[i], i); opairs2[i] = std::make_pair(i, labelset[i]); } Relabel(&R, ipairs2, opairs2); CHECK(Equiv(R, T)); VLOG(1) << "Check on-the-fly relabeling"; RelabelFst<Arc> Rdelay(T, ipairs1, opairs1); RelabelFst<Arc> RRdelay(Rdelay, ipairs2, opairs2); CHECK(Equiv(RRdelay, T)); } { VLOG(1) << "Check encoding/decoding (destructive)."; VectorFst<Arc> D(T); uint32 encode_props = 0; if (rand() % 2) encode_props |= kEncodeLabels; if (rand() % 2) encode_props |= kEncodeWeights; EncodeMapper<Arc> encoder(encode_props, ENCODE); Encode(&D, &encoder); Decode(&D, encoder); CHECK(Equiv(D, T)); } { VLOG(1) << "Check encoding/decoding (delayed)."; uint32 encode_props = 0; if (rand() % 2) encode_props |= kEncodeLabels; if (rand() % 2) encode_props |= kEncodeWeights; EncodeMapper<Arc> encoder(encode_props, ENCODE); EncodeFst<Arc> E(T, &encoder); VectorFst<Arc> Encoded(E); DecodeFst<Arc> D(Encoded, encoder); CHECK(Equiv(D, T)); } { VLOG(1) << "Check gallic mappers (constructive)."; ToGallicMapper<Arc> to_mapper; FromGallicMapper<Arc> from_mapper; VectorFst<GallicArc<Arc>> G; VectorFst<Arc> F; ArcMap(T, &G, to_mapper); ArcMap(G, &F, from_mapper); CHECK(Equiv(T, F)); } { VLOG(1) << "Check gallic mappers (delayed)."; ToGallicMapper<Arc> to_mapper; FromGallicMapper<Arc> from_mapper; ArcMapFst<Arc, GallicArc<Arc>, ToGallicMapper<Arc>> G(T, to_mapper); ArcMapFst<GallicArc<Arc>, Arc, FromGallicMapper<Arc>> F(G, from_mapper); CHECK(Equiv(T, F)); } } // Tests compose-based operations. void TestCompose(const Fst<Arc> &T1, const Fst<Arc> &T2, const Fst<Arc> &T3) { if (!(Weight::Properties() & kCommutative)) return; VectorFst<Arc> S1(T1); VectorFst<Arc> S2(T2); VectorFst<Arc> S3(T3); ILabelCompare<Arc> icomp; OLabelCompare<Arc> ocomp; ArcSort(&S1, ocomp); ArcSort(&S2, ocomp); ArcSort(&S3, icomp); { VLOG(1) << "Check composition is associative."; ComposeFst<Arc> C1(S1, S2); ComposeFst<Arc> C2(C1, S3); ComposeFst<Arc> C3(S2, S3); ComposeFst<Arc> C4(S1, C3); CHECK(Equiv(C2, C4)); } { VLOG(1) << "Check composition left distributes over union."; UnionFst<Arc> U1(S2, S3); ComposeFst<Arc> C1(S1, U1); ComposeFst<Arc> C2(S1, S2); ComposeFst<Arc> C3(S1, S3); UnionFst<Arc> U2(C2, C3); CHECK(Equiv(C1, U2)); } { VLOG(1) << "Check composition right distributes over union."; UnionFst<Arc> U1(S1, S2); ComposeFst<Arc> C1(U1, S3); ComposeFst<Arc> C2(S1, S3); ComposeFst<Arc> C3(S2, S3); UnionFst<Arc> U2(C2, C3); CHECK(Equiv(C1, U2)); } VectorFst<Arc> A1(S1); VectorFst<Arc> A2(S2); VectorFst<Arc> A3(S3); Project(&A1, PROJECT_OUTPUT); Project(&A2, PROJECT_INPUT); Project(&A3, PROJECT_INPUT); { VLOG(1) << "Check intersection is commutative."; IntersectFst<Arc> I1(A1, A2); IntersectFst<Arc> I2(A2, A1); CHECK(Equiv(I1, I2)); } { VLOG(1) << "Check all epsilon filters leads to equivalent results."; typedef Matcher<Fst<Arc>> M; ComposeFst<Arc> C1(S1, S2); ComposeFst<Arc> C2( S1, S2, ComposeFstOptions<Arc, M, AltSequenceComposeFilter<M>>()); ComposeFst<Arc> C3(S1, S2, ComposeFstOptions<Arc, M, MatchComposeFilter<M>>()); CHECK(Equiv(C1, C2)); CHECK(Equiv(C1, C3)); if ((Weight::Properties() & kIdempotent) || S1.Properties(kNoOEpsilons, false) || S2.Properties(kNoIEpsilons, false)) { ComposeFst<Arc> C4( S1, S2, ComposeFstOptions<Arc, M, TrivialComposeFilter<M>>()); CHECK(Equiv(C1, C4)); } if (S1.Properties(kNoOEpsilons, false) && S2.Properties(kNoIEpsilons, false)) { ComposeFst<Arc> C5(S1, S2, ComposeFstOptions<Arc, M, NullComposeFilter<M>>()); CHECK(Equiv(C1, C5)); } } { VLOG(1) << "Check look-ahead filters lead to equivalent results."; VectorFst<Arc> C1, C2; Compose(S1, S2, &C1); LookAheadCompose(S1, S2, &C2); CHECK(Equiv(C1, C2)); } } // Tests sorting operations void TestSort(const Fst<Arc> &T) { ILabelCompare<Arc> icomp; OLabelCompare<Arc> ocomp; { VLOG(1) << "Check arc sorted Fst is equivalent to its input."; VectorFst<Arc> S1(T); ArcSort(&S1, icomp); CHECK(Equiv(T, S1)); } { VLOG(1) << "Check destructive and delayed arcsort are equivalent."; VectorFst<Arc> S1(T); ArcSort(&S1, icomp); ArcSortFst<Arc, ILabelCompare<Arc>> S2(T, icomp); CHECK(Equiv(S1, S2)); } { VLOG(1) << "Check ilabel sorting vs. olabel sorting with inversions."; VectorFst<Arc> S1(T); VectorFst<Arc> S2(T); ArcSort(&S1, icomp); Invert(&S2); ArcSort(&S2, ocomp); Invert(&S2); CHECK(Equiv(S1, S2)); } { VLOG(1) << "Check topologically sorted Fst is equivalent to its input."; VectorFst<Arc> S1(T); TopSort(&S1); CHECK(Equiv(T, S1)); } { VLOG(1) << "Check reverse(reverse(T)) = T"; for (int i = 0; i < 2; ++i) { VectorFst<ReverseArc<Arc>> R1; VectorFst<Arc> R2; bool require_superinitial = i == 1; Reverse(T, &R1, require_superinitial); Reverse(R1, &R2, require_superinitial); CHECK(Equiv(T, R2)); } } } // Tests optimization operations void TestOptimize(const Fst<Arc> &T) { uint64 tprops = T.Properties(kFstProperties, true); uint64 wprops = Weight::Properties(); VectorFst<Arc> A(T); Project(&A, PROJECT_INPUT); { VLOG(1) << "Check connected FST is equivalent to its input."; VectorFst<Arc> C1(T); Connect(&C1); CHECK(Equiv(T, C1)); } if ((wprops & kSemiring) == kSemiring && (tprops & kAcyclic || wprops & kIdempotent)) { VLOG(1) << "Check epsilon-removed FST is equivalent to its input."; VectorFst<Arc> R1(T); RmEpsilon(&R1); CHECK(Equiv(T, R1)); VLOG(1) << "Check destructive and delayed epsilon removal" << "are equivalent."; RmEpsilonFst<Arc> R2(T); CHECK(Equiv(R1, R2)); VLOG(1) << "Check an FST with a large proportion" << " of epsilon transitions:"; // Maps all transitions of T to epsilon-transitions and append // a non-epsilon transition. VectorFst<Arc> U; ArcMap(T, &U, EpsMapper<Arc>()); VectorFst<Arc> V; V.SetStart(V.AddState()); Arc arc(1, 1, Weight::One(), V.AddState()); V.AddArc(V.Start(), arc); V.SetFinal(arc.nextstate, Weight::One()); Concat(&U, V); // Check that epsilon-removal preserves the shortest-distance // from the initial state to the final states. std::vector<Weight> d; ShortestDistance(U, &d, true); Weight w = U.Start() < d.size() ? d[U.Start()] : Weight::Zero(); VectorFst<Arc> U1(U); RmEpsilon(&U1); ShortestDistance(U1, &d, true); Weight w1 = U1.Start() < d.size() ? d[U1.Start()] : Weight::Zero(); CHECK(ApproxEqual(w, w1, kTestDelta)); RmEpsilonFst<Arc> U2(U); ShortestDistance(U2, &d, true); Weight w2 = U2.Start() < d.size() ? d[U2.Start()] : Weight::Zero(); CHECK(ApproxEqual(w, w2, kTestDelta)); } if ((wprops & kSemiring) == kSemiring && tprops & kAcyclic) { VLOG(1) << "Check determinized FSA is equivalent to its input."; DeterminizeFst<Arc> D(A); CHECK(Equiv(A, D)); { VLOG(1) << "Check determinized FST is equivalent to its input."; DeterminizeFstOptions<Arc> opts; opts.type = DETERMINIZE_NONFUNCTIONAL; DeterminizeFst<Arc> DT(T, opts); CHECK(Equiv(T, DT)); } if ((wprops & (kPath | kCommutative)) == (kPath | kCommutative)) { VLOG(1) << "Check pruning in determinization"; VectorFst<Arc> P; Weight threshold = (*weight_generator_)(); DeterminizeOptions<Arc> opts; opts.weight_threshold = threshold; Determinize(A, &P, opts); CHECK(P.Properties(kIDeterministic, true)); CHECK(PruneEquiv(A, P, threshold)); } if ((wprops & kPath) == kPath) { VLOG(1) << "Check min-determinization"; // Ensures no input epsilons VectorFst<Arc> R(T); std::vector<std::pair<Label, Label>> ipairs, opairs; ipairs.push_back(std::pair<Label, Label>(0, 1)); Relabel(&R, ipairs, opairs); VectorFst<Arc> M; DeterminizeOptions<Arc> opts; opts.type = DETERMINIZE_DISAMBIGUATE; Determinize(R, &M, opts); CHECK(M.Properties(kIDeterministic, true)); CHECK(MinRelated(M, R)); } int n; { VLOG(1) << "Check size(min(det(A))) <= size(det(A))" << " and min(det(A)) equiv det(A)"; VectorFst<Arc> M(D); n = M.NumStates(); Minimize(&M, static_cast<MutableFst<Arc> *>(nullptr), kDelta); CHECK(Equiv(D, M)); CHECK(M.NumStates() <= n); n = M.NumStates(); } if (n && (wprops & kIdempotent) == kIdempotent && A.Properties(kNoEpsilons, true)) { VLOG(1) << "Check that Revuz's algorithm leads to the" << " same number of states as Brozozowski's algorithm"; // Skip test if A is the empty machine or contains epsilons or // if the semiring is not idempotent (to avoid floating point // errors) VectorFst<Arc> R; Reverse(A, &R); RmEpsilon(&R); DeterminizeFst<Arc> DR(R); VectorFst<Arc> RD; Reverse(DR, &RD); DeterminizeFst<Arc> DRD(RD); VectorFst<Arc> M(DRD); CHECK_EQ(n + 1, M.NumStates()); // Accounts for the epsilon transition // to the initial state } } if ((wprops & kSemiring) == kSemiring && tprops & kAcyclic) { VLOG(1) << "Check disambiguated FSA is equivalent to its input."; VectorFst<Arc> R(A), D; RmEpsilon(&R); Disambiguate(R, &D); CHECK(Equiv(R, D)); VLOG(1) << "Check disambiguated FSA is unambiguous"; CHECK(Unambiguous(D)); /* TODO(riley): find out why this fails if ((wprops & (kPath | kCommutative)) == (kPath | kCommutative)) { VLOG(1) << "Check pruning in disambiguation"; VectorFst<Arc> P; Weight threshold = (*weight_generator_)(); DisambiguateOptions<Arc> opts; opts.weight_threshold = threshold; Disambiguate(R, &P, opts); CHECK(Unambiguous(P)); CHECK(PruneEquiv(A, P, threshold)); } */ } if (Arc::Type() == LogArc::Type() || Arc::Type() == StdArc::Type()) { VLOG(1) << "Check reweight(T) equiv T"; std::vector<Weight> potential; VectorFst<Arc> RI(T); VectorFst<Arc> RF(T); while (potential.size() < RI.NumStates()) potential.push_back((*weight_generator_)()); Reweight(&RI, potential, REWEIGHT_TO_INITIAL); CHECK(Equiv(T, RI)); Reweight(&RF, potential, REWEIGHT_TO_FINAL); CHECK(Equiv(T, RF)); } if ((wprops & kIdempotent) || (tprops & kAcyclic)) { VLOG(1) << "Check pushed FST is equivalent to input FST."; // Pushing towards the final state. if (wprops & kRightSemiring) { VectorFst<Arc> P1; Push<Arc, REWEIGHT_TO_FINAL>(T, &P1, kPushLabels); CHECK(Equiv(T, P1)); VectorFst<Arc> P2; Push<Arc, REWEIGHT_TO_FINAL>(T, &P2, kPushWeights); CHECK(Equiv(T, P2)); VectorFst<Arc> P3; Push<Arc, REWEIGHT_TO_FINAL>(T, &P3, kPushLabels | kPushWeights); CHECK(Equiv(T, P3)); } // Pushing towards the initial state. if (wprops & kLeftSemiring) { VectorFst<Arc> P1; Push<Arc, REWEIGHT_TO_INITIAL>(T, &P1, kPushLabels); CHECK(Equiv(T, P1)); VectorFst<Arc> P2; Push<Arc, REWEIGHT_TO_INITIAL>(T, &P2, kPushWeights); CHECK(Equiv(T, P2)); VectorFst<Arc> P3; Push<Arc, REWEIGHT_TO_INITIAL>(T, &P3, kPushLabels | kPushWeights); CHECK(Equiv(T, P3)); } } if ((wprops & (kPath | kCommutative)) == (kPath | kCommutative)) { VLOG(1) << "Check pruning algorithm"; { VLOG(1) << "Check equiv. of constructive and destructive algorithms"; Weight thresold = (*weight_generator_)(); VectorFst<Arc> P1(T); Prune(&P1, thresold); VectorFst<Arc> P2; Prune(T, &P2, thresold); CHECK(Equiv(P1, P2)); } { VLOG(1) << "Check prune(reverse) equiv reverse(prune)"; Weight thresold = (*weight_generator_)(); VectorFst<ReverseArc<Arc>> R; VectorFst<Arc> P1(T); VectorFst<Arc> P2; Prune(&P1, thresold); Reverse(T, &R); Prune(&R, thresold.Reverse()); Reverse(R, &P2); CHECK(Equiv(P1, P2)); } { VLOG(1) << "Check: ShortestDistance(A - prune(A))" << " > ShortestDistance(A) times Threshold"; Weight threshold = (*weight_generator_)(); VectorFst<Arc> P; Prune(A, &P, threshold); CHECK(PruneEquiv(A, P, threshold)); } } if (tprops & kAcyclic) { VLOG(1) << "Check synchronize(T) equiv T"; SynchronizeFst<Arc> S(T); CHECK(Equiv(T, S)); } } // Tests search operations void TestSearch(const Fst<Arc> &T) { uint64 wprops = Weight::Properties(); VectorFst<Arc> A(T); Project(&A, PROJECT_INPUT); if ((wprops & (kPath | kRightSemiring)) == (kPath | kRightSemiring)) { VLOG(1) << "Check 1-best weight."; VectorFst<Arc> path; ShortestPath(T, &path); Weight tsum = ShortestDistance(T); Weight psum = ShortestDistance(path); CHECK(ApproxEqual(tsum, psum, kTestDelta)); } if ((wprops & (kPath | kSemiring)) == (kPath | kSemiring)) { VLOG(1) << "Check n-best weights"; VectorFst<Arc> R(A); RmEpsilon(&R, /*connect=*/ true, Arc::Weight::Zero(), kNoStateId, kDelta); int nshortest = rand() % kNumRandomShortestPaths + 2; VectorFst<Arc> paths; ShortestPath(R, &paths, nshortest, /*unique=*/ true, /*first_path=*/ false, Weight::Zero(), kNumShortestStates, kDelta); std::vector<Weight> distance; ShortestDistance(paths, &distance, true, kDelta); StateId pstart = paths.Start(); if (pstart != kNoStateId) { ArcIterator<Fst<Arc>> piter(paths, pstart); for (; !piter.Done(); piter.Next()) { StateId s = piter.Value().nextstate; Weight nsum = s < distance.size() ? Times(piter.Value().weight, distance[s]) : Weight::Zero(); VectorFst<Arc> path; ShortestPath(R, &path, 1, false, false, Weight::Zero(), kNoStateId, kDelta); Weight dsum = ShortestDistance(path, kDelta); CHECK(ApproxEqual(nsum, dsum, kTestDelta)); ArcMap(&path, RmWeightMapper<Arc>()); VectorFst<Arc> S; Difference(R, path, &S); R = S; } } } } // Tests if two FSTS are equivalent by checking if random // strings from one FST are transduced the same by both FSTs. template <class A> bool Equiv(const Fst<A> &fst1, const Fst<A> &fst2) { VLOG(1) << "Check FSTs for sanity (including property bits)."; CHECK(Verify(fst1)); CHECK(Verify(fst2)); // Ensures seed used once per instantiation. static UniformArcSelector<A> uniform_selector(seed_); RandGenOptions<UniformArcSelector<A>> opts(uniform_selector, kRandomPathLength); return RandEquivalent(fst1, fst2, kNumRandomPaths, kTestDelta, opts); } // Tests FSA is unambiguous bool Unambiguous(const Fst<Arc> &fst) { VectorFst<StdArc> sfst, dfst; VectorFst<LogArc> lfst1, lfst2; Map(fst, &sfst, RmWeightMapper<Arc, StdArc>()); Determinize(sfst, &dfst); Map(fst, &lfst1, RmWeightMapper<Arc, LogArc>()); Map(dfst, &lfst2, RmWeightMapper<StdArc, LogArc>()); return Equiv(lfst1, lfst2); } // Ensures input-epsilon free transducers fst1 and fst2 have the // same domain and that for each string pair '(is, os)' in fst1, // '(is, os)' is the minimum weight match to 'is' in fst2. template <class A> bool MinRelated(const Fst<A> &fst1, const Fst<A> &fst2) { // Same domain VectorFst<Arc> P1(fst1), P2(fst2); Project(&P1, PROJECT_INPUT); Project(&P2, PROJECT_INPUT); if (!Equiv(P1, P2)) { LOG(ERROR) << "Inputs not equivalent"; return false; } // Ensures seed used once per instantiation. static UniformArcSelector<A> uniform_selector(seed_); RandGenOptions<UniformArcSelector<A>> opts(uniform_selector, kRandomPathLength); VectorFst<Arc> path, paths1, paths2; for (std::ptrdiff_t n = 0; n < kNumRandomPaths; ++n) { RandGen(fst1, &path, opts); Invert(&path); Map(&path, RmWeightMapper<Arc>()); Compose(path, fst2, &paths1); Weight sum1 = ShortestDistance(paths1); Compose(paths1, path, &paths2); Weight sum2 = ShortestDistance(paths2); if (!ApproxEqual(Plus(sum1, sum2), sum2, kTestDelta)) { LOG(ERROR) << "Sums not equivalent: " << sum1 << " " << sum2; return false; } } return true; } // Tests ShortestDistance(A - P) >= // ShortestDistance(A) times Threshold. template <class A> bool PruneEquiv(const Fst<A> &fst, const Fst<A> &pfst, Weight threshold) { VLOG(1) << "Check FSTs for sanity (including property bits)."; CHECK(Verify(fst)); CHECK(Verify(pfst)); DifferenceFst<Arc> D(fst, DeterminizeFst<Arc>(RmEpsilonFst<Arc>( ArcMapFst<Arc, Arc, RmWeightMapper<Arc>>( pfst, RmWeightMapper<Arc>())))); Weight sum1 = Times(ShortestDistance(fst), threshold); Weight sum2 = ShortestDistance(D); return ApproxEqual(Plus(sum1, sum2), sum1, kTestDelta); } // Random seed. int seed_; // FST with no states VectorFst<Arc> zero_fst_; // FST with one state that accepts epsilon. VectorFst<Arc> one_fst_; // FST with one state that accepts all strings. VectorFst<Arc> univ_fst_; // Generates weights used in testing. WeightGenerator *weight_generator_; // Maximum random path length. static const int kRandomPathLength; // Number of random paths to explore. static const int kNumRandomPaths; // Maximum number of nshortest paths. static const int kNumRandomShortestPaths; // Maximum number of nshortest states. static const int kNumShortestStates; // Delta for equivalence tests. static const float kTestDelta; WeightedTester(const WeightedTester &) = delete; WeightedTester &operator=(const WeightedTester &) = delete; }; template <class A, class WG> const int WeightedTester<A, WG>::kRandomPathLength = 25; template <class A, class WG> const int WeightedTester<A, WG>::kNumRandomPaths = 100; template <class A, class WG> const int WeightedTester<A, WG>::kNumRandomShortestPaths = 100; template <class A, class WG> const int WeightedTester<A, WG>::kNumShortestStates = 10000; template <class A, class WG> const float WeightedTester<A, WG>::kTestDelta = .05; // This class tests a variety of identities and properties that must // hold for various algorithms on unweighted FSAs and that are not tested // by WeightedTester. Only the specialization does anything interesting. template <class Arc> class UnweightedTester { public: UnweightedTester(const Fst<Arc> &zero_fsa, const Fst<Arc> &one_fsa, const Fst<Arc> &univ_fsa) {} void Test(const Fst<Arc> &A1, const Fst<Arc> &A2, const Fst<Arc> &A3) {} }; // Specialization for StdArc. This should work for any commutative, // idempotent semiring when restricted to the unweighted case // (being isomorphic to the boolean semiring). template <> class UnweightedTester<StdArc> { public: typedef StdArc Arc; typedef Arc::Label Label; typedef Arc::StateId StateId; typedef Arc::Weight Weight; UnweightedTester(const Fst<Arc> &zero_fsa, const Fst<Arc> &one_fsa, const Fst<Arc> &univ_fsa) : zero_fsa_(zero_fsa), one_fsa_(one_fsa), univ_fsa_(univ_fsa) {} void Test(const Fst<Arc> &A1, const Fst<Arc> &A2, const Fst<Arc> &A3) { TestRational(A1, A2, A3); TestIntersect(A1, A2, A3); TestOptimize(A1); } private: // Tests rational operations with identities void TestRational(const Fst<Arc> &A1, const Fst<Arc> &A2, const Fst<Arc> &A3) { { VLOG(1) << "Check the union contains its arguments (destructive)."; VectorFst<Arc> U(A1); Union(&U, A2); CHECK(Subset(A1, U)); CHECK(Subset(A2, U)); } { VLOG(1) << "Check the union contains its arguments (delayed)."; UnionFst<Arc> U(A1, A2); CHECK(Subset(A1, U)); CHECK(Subset(A2, U)); } { VLOG(1) << "Check if A^n c A* (destructive)."; VectorFst<Arc> C(one_fsa_); int n = rand() % 5; for (int i = 0; i < n; ++i) Concat(&C, A1); VectorFst<Arc> S(A1); Closure(&S, CLOSURE_STAR); CHECK(Subset(C, S)); } { VLOG(1) << "Check if A^n c A* (delayed)."; int n = rand() % 5; Fst<Arc> *C = new VectorFst<Arc>(one_fsa_); for (int i = 0; i < n; ++i) { ConcatFst<Arc> *F = new ConcatFst<Arc>(*C, A1); delete C; C = F; } ClosureFst<Arc> S(A1, CLOSURE_STAR); CHECK(Subset(*C, S)); delete C; } } // Tests intersect-based operations. void TestIntersect(const Fst<Arc> &A1, const Fst<Arc> &A2, const Fst<Arc> &A3) { VectorFst<Arc> S1(A1); VectorFst<Arc> S2(A2); VectorFst<Arc> S3(A3); ILabelCompare<Arc> comp; ArcSort(&S1, comp); ArcSort(&S2, comp); ArcSort(&S3, comp); { VLOG(1) << "Check the intersection is contained in its arguments."; IntersectFst<Arc> I1(S1, S2); CHECK(Subset(I1, S1)); CHECK(Subset(I1, S2)); } { VLOG(1) << "Check union distributes over intersection."; IntersectFst<Arc> I1(S1, S2); UnionFst<Arc> U1(I1, S3); UnionFst<Arc> U2(S1, S3); UnionFst<Arc> U3(S2, S3); ArcSortFst<Arc, ILabelCompare<Arc>> S4(U3, comp); IntersectFst<Arc> I2(U2, S4); CHECK(Equiv(U1, I2)); } VectorFst<Arc> C1; VectorFst<Arc> C2; Complement(S1, &C1); Complement(S2, &C2); ArcSort(&C1, comp); ArcSort(&C2, comp); { VLOG(1) << "Check S U S' = Sigma*"; UnionFst<Arc> U(S1, C1); CHECK(Equiv(U, univ_fsa_)); } { VLOG(1) << "Check S n S' = {}"; IntersectFst<Arc> I(S1, C1); CHECK(Equiv(I, zero_fsa_)); } { VLOG(1) << "Check (S1' U S2') == (S1 n S2)'"; UnionFst<Arc> U(C1, C2); IntersectFst<Arc> I(S1, S2); VectorFst<Arc> C3; Complement(I, &C3); CHECK(Equiv(U, C3)); } { VLOG(1) << "Check (S1' n S2') == (S1 U S2)'"; IntersectFst<Arc> I(C1, C2); UnionFst<Arc> U(S1, S2); VectorFst<Arc> C3; Complement(U, &C3); CHECK(Equiv(I, C3)); } } // Tests optimization operations void TestOptimize(const Fst<Arc> &A) { { VLOG(1) << "Check determinized FSA is equivalent to its input."; DeterminizeFst<Arc> D(A); CHECK(Equiv(A, D)); } { VLOG(1) << "Check disambiguated FSA is equivalent to its input."; VectorFst<Arc> R(A), D; RmEpsilon(&R); Disambiguate(R, &D); CHECK(Equiv(R, D)); } { VLOG(1) << "Check minimized FSA is equivalent to its input."; int n; { RmEpsilonFst<Arc> R(A); DeterminizeFst<Arc> D(R); VectorFst<Arc> M(D); Minimize(&M, static_cast<MutableFst<Arc> *>(nullptr), kDelta); CHECK(Equiv(A, M)); n = M.NumStates(); } if (n) { // Skip test if A is the empty machine VLOG(1) << "Check that Hopcroft's and Revuz's algorithms lead to the" << " same number of states as Brozozowski's algorithm"; VectorFst<Arc> R; Reverse(A, &R); RmEpsilon(&R); DeterminizeFst<Arc> DR(R); VectorFst<Arc> RD; Reverse(DR, &RD); DeterminizeFst<Arc> DRD(RD); VectorFst<Arc> M(DRD); CHECK_EQ(n + 1, M.NumStates()); // Accounts for the epsilon transition // to the initial state } } } // Tests if two FSAS are equivalent. bool Equiv(const Fst<Arc> &fsa1, const Fst<Arc> &fsa2) { VLOG(1) << "Check FSAs for sanity (including property bits)."; CHECK(Verify(fsa1)); CHECK(Verify(fsa2)); VectorFst<Arc> vfsa1(fsa1); VectorFst<Arc> vfsa2(fsa2); RmEpsilon(&vfsa1); RmEpsilon(&vfsa2); DeterminizeFst<Arc> dfa1(vfsa1); DeterminizeFst<Arc> dfa2(vfsa2); // Test equivalence using union-find algorithm bool equiv1 = Equivalent(dfa1, dfa2); // Test equivalence by checking if (S1 - S2) U (S2 - S1) is empty ILabelCompare<Arc> comp; VectorFst<Arc> sdfa1(dfa1); ArcSort(&sdfa1, comp); VectorFst<Arc> sdfa2(dfa2); ArcSort(&sdfa2, comp); DifferenceFst<Arc> dfsa1(sdfa1, sdfa2); DifferenceFst<Arc> dfsa2(sdfa2, sdfa1); VectorFst<Arc> ufsa(dfsa1); Union(&ufsa, dfsa2); Connect(&ufsa); bool equiv2 = ufsa.NumStates() == 0; // Check two equivalence tests match CHECK((equiv1 && equiv2) || (!equiv1 && !equiv2)); return equiv1; } // Tests if FSA1 is a subset of FSA2 (disregarding weights). bool Subset(const Fst<Arc> &fsa1, const Fst<Arc> &fsa2) { VLOG(1) << "Check FSAs (incl. property bits) for sanity"; CHECK(Verify(fsa1)); CHECK(Verify(fsa2)); VectorFst<StdArc> vfsa1; VectorFst<StdArc> vfsa2; RmEpsilon(&vfsa1); RmEpsilon(&vfsa2); ILabelCompare<StdArc> comp; ArcSort(&vfsa1, comp); ArcSort(&vfsa2, comp); IntersectFst<StdArc> ifsa(vfsa1, vfsa2); DeterminizeFst<StdArc> dfa1(vfsa1); DeterminizeFst<StdArc> dfa2(ifsa); return Equivalent(dfa1, dfa2); } // Returns complement Fsa void Complement(const Fst<Arc> &ifsa, MutableFst<Arc> *ofsa) { RmEpsilonFst<Arc> rfsa(ifsa); DeterminizeFst<Arc> dfa(rfsa); DifferenceFst<Arc> cfsa(univ_fsa_, dfa); *ofsa = cfsa; } // FSA with no states VectorFst<Arc> zero_fsa_; // FSA with one state that accepts epsilon. VectorFst<Arc> one_fsa_; // FSA with one state that accepts all strings. VectorFst<Arc> univ_fsa_; }; // This class tests a variety of identities and properties that must // hold for various FST algorithms. It randomly generates FSTs, using // function object 'weight_generator' to select weights. 'WeightTester' // and 'UnweightedTester' are then called. template <class Arc, class WeightGenerator> class AlgoTester { public: typedef typename Arc::Label Label; typedef typename Arc::StateId StateId; typedef typename Arc::Weight Weight; AlgoTester(WeightGenerator generator, int seed) : weight_generator_(generator) { one_fst_.AddState(); one_fst_.SetStart(0); one_fst_.SetFinal(0, Weight::One()); univ_fst_.AddState(); univ_fst_.SetStart(0); univ_fst_.SetFinal(0, Weight::One()); for (int i = 0; i < kNumRandomLabels; ++i) univ_fst_.AddArc(0, Arc(i, i, Weight::One(), 0)); weighted_tester_ = new WeightedTester<Arc, WeightGenerator>( seed, zero_fst_, one_fst_, univ_fst_, &weight_generator_); unweighted_tester_ = new UnweightedTester<Arc>(zero_fst_, one_fst_, univ_fst_); } ~AlgoTester() { delete weighted_tester_; delete unweighted_tester_; } void MakeRandFst(MutableFst<Arc> *fst) { RandFst<Arc, WeightGenerator>(kNumRandomStates, kNumRandomArcs, kNumRandomLabels, kAcyclicProb, &weight_generator_, fst); } void Test() { VLOG(1) << "weight type = " << Weight::Type(); for (int i = 0; i < FLAGS_repeat; ++i) { // Random transducers VectorFst<Arc> T1; VectorFst<Arc> T2; VectorFst<Arc> T3; MakeRandFst(&T1); MakeRandFst(&T2); MakeRandFst(&T3); weighted_tester_->Test(T1, T2, T3); VectorFst<Arc> A1(T1); VectorFst<Arc> A2(T2); VectorFst<Arc> A3(T3); Project(&A1, PROJECT_OUTPUT); Project(&A2, PROJECT_INPUT); Project(&A3, PROJECT_INPUT); ArcMap(&A1, rm_weight_mapper_); ArcMap(&A2, rm_weight_mapper_); ArcMap(&A3, rm_weight_mapper_); unweighted_tester_->Test(A1, A2, A3); } } private: // Generates weights used in testing. WeightGenerator weight_generator_; // FST with no states VectorFst<Arc> zero_fst_; // FST with one state that accepts epsilon. VectorFst<Arc> one_fst_; // FST with one state that accepts all strings. VectorFst<Arc> univ_fst_; // Tests weighted FSTs WeightedTester<Arc, WeightGenerator> *weighted_tester_; // Tests unweighted FSTs UnweightedTester<Arc> *unweighted_tester_; // Mapper to remove weights from an Fst RmWeightMapper<Arc> rm_weight_mapper_; // Maximum number of states in random test Fst. static const int kNumRandomStates; // Maximum number of arcs in random test Fst. static const int kNumRandomArcs; // Number of alternative random labels. static const int kNumRandomLabels; // Probability to force an acyclic Fst static const float kAcyclicProb; // Maximum random path length. static const int kRandomPathLength; // Number of random paths to explore. static const int kNumRandomPaths; AlgoTester(const AlgoTester &) = delete; AlgoTester &operator=(const AlgoTester &) = delete; }; template <class A, class G> const int AlgoTester<A, G>::kNumRandomStates = 10; template <class A, class G> const int AlgoTester<A, G>::kNumRandomArcs = 25; template <class A, class G> const int AlgoTester<A, G>::kNumRandomLabels = 5; template <class A, class G> const float AlgoTester<A, G>::kAcyclicProb = .25; template <class A, class G> const int AlgoTester<A, G>::kRandomPathLength = 25; template <class A, class G> const int AlgoTester<A, G>::kNumRandomPaths = 100; } // namespace fst #endif // FST_TEST_ALGO_TEST_H_
0
coqui_public_repos/TTS/TTS/tts/layers
coqui_public_repos/TTS/TTS/tts/layers/tacotron/tacotron.py
# coding: utf-8 # adapted from https://github.com/r9y9/tacotron_pytorch import torch from torch import nn from .attentions import init_attn from .common_layers import Prenet class BatchNormConv1d(nn.Module): r"""A wrapper for Conv1d with BatchNorm. It sets the activation function between Conv and BatchNorm layers. BatchNorm layer is initialized with the TF default values for momentum and eps. Args: in_channels: size of each input sample out_channels: size of each output samples kernel_size: kernel size of conv filters stride: stride of conv filters padding: padding of conv filters activation: activation function set b/w Conv1d and BatchNorm Shapes: - input: (B, D) - output: (B, D) """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activation=None): super().__init__() self.padding = padding self.padder = nn.ConstantPad1d(padding, 0) self.conv1d = nn.Conv1d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0, bias=False ) # Following tensorflow's default parameters self.bn = nn.BatchNorm1d(out_channels, momentum=0.99, eps=1e-3) self.activation = activation # self.init_layers() def init_layers(self): if isinstance(self.activation, torch.nn.ReLU): w_gain = "relu" elif isinstance(self.activation, torch.nn.Tanh): w_gain = "tanh" elif self.activation is None: w_gain = "linear" else: raise RuntimeError("Unknown activation function") torch.nn.init.xavier_uniform_(self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_gain)) def forward(self, x): x = self.padder(x) x = self.conv1d(x) x = self.bn(x) if self.activation is not None: x = self.activation(x) return x class Highway(nn.Module): r"""Highway layers as explained in https://arxiv.org/abs/1505.00387 Args: in_features (int): size of each input sample out_feature (int): size of each output sample Shapes: - input: (B, *, H_in) - output: (B, *, H_out) """ # TODO: Try GLU layer def __init__(self, in_features, out_feature): super().__init__() self.H = nn.Linear(in_features, out_feature) self.H.bias.data.zero_() self.T = nn.Linear(in_features, out_feature) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() # self.init_layers() def init_layers(self): torch.nn.init.xavier_uniform_(self.H.weight, gain=torch.nn.init.calculate_gain("relu")) torch.nn.init.xavier_uniform_(self.T.weight, gain=torch.nn.init.calculate_gain("sigmoid")) def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) class CBHG(nn.Module): """CBHG module: a recurrent neural network composed of: - 1-d convolution banks - Highway networks + residual connections - Bidirectional gated recurrent units Args: in_features (int): sample size K (int): max filter size in conv bank projections (list): conv channel sizes for conv projections num_highways (int): number of highways layers Shapes: - input: (B, C, T_in) - output: (B, T_in, C*2) """ # pylint: disable=dangerous-default-value def __init__( self, in_features, K=16, conv_bank_features=128, conv_projections=[128, 128], highway_features=128, gru_features=128, num_highways=4, ): super().__init__() self.in_features = in_features self.conv_bank_features = conv_bank_features self.highway_features = highway_features self.gru_features = gru_features self.conv_projections = conv_projections self.relu = nn.ReLU() # list of conv1d bank with filter size k=1...K # TODO: try dilational layers instead self.conv1d_banks = nn.ModuleList( [ BatchNormConv1d( in_features, conv_bank_features, kernel_size=k, stride=1, padding=[(k - 1) // 2, k // 2], activation=self.relu, ) for k in range(1, K + 1) ] ) # max pooling of conv bank, with padding # TODO: try average pooling OR larger kernel size out_features = [K * conv_bank_features] + conv_projections[:-1] activations = [self.relu] * (len(conv_projections) - 1) activations += [None] # setup conv1d projection layers layer_set = [] for in_size, out_size, ac in zip(out_features, conv_projections, activations): layer = BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1, padding=[1, 1], activation=ac) layer_set.append(layer) self.conv1d_projections = nn.ModuleList(layer_set) # setup Highway layers if self.highway_features != conv_projections[-1]: self.pre_highway = nn.Linear(conv_projections[-1], highway_features, bias=False) self.highways = nn.ModuleList([Highway(highway_features, highway_features) for _ in range(num_highways)]) # bi-directional GPU layer self.gru = nn.GRU(gru_features, gru_features, 1, batch_first=True, bidirectional=True) def forward(self, inputs): # (B, in_features, T_in) x = inputs # (B, hid_features*K, T_in) # Concat conv1d bank outputs outs = [] for conv1d in self.conv1d_banks: out = conv1d(x) outs.append(out) x = torch.cat(outs, dim=1) assert x.size(1) == self.conv_bank_features * len(self.conv1d_banks) for conv1d in self.conv1d_projections: x = conv1d(x) x += inputs x = x.transpose(1, 2) if self.highway_features != self.conv_projections[-1]: x = self.pre_highway(x) # Residual connection # TODO: try residual scaling as in Deep Voice 3 # TODO: try plain residual layers for highway in self.highways: x = highway(x) # (B, T_in, hid_features*2) # TODO: replace GRU with convolution as in Deep Voice 3 self.gru.flatten_parameters() outputs, _ = self.gru(x) return outputs class EncoderCBHG(nn.Module): r"""CBHG module with Encoder specific arguments""" def __init__(self): super().__init__() self.cbhg = CBHG( 128, K=16, conv_bank_features=128, conv_projections=[128, 128], highway_features=128, gru_features=128, num_highways=4, ) def forward(self, x): return self.cbhg(x) class Encoder(nn.Module): r"""Stack Prenet and CBHG module for encoder Args: inputs (FloatTensor): embedding features Shapes: - inputs: (B, T, D_in) - outputs: (B, T, 128 * 2) """ def __init__(self, in_features): super().__init__() self.prenet = Prenet(in_features, out_features=[256, 128]) self.cbhg = EncoderCBHG() def forward(self, inputs): # B x T x prenet_dim outputs = self.prenet(inputs) outputs = self.cbhg(outputs.transpose(1, 2)) return outputs class PostCBHG(nn.Module): def __init__(self, mel_dim): super().__init__() self.cbhg = CBHG( mel_dim, K=8, conv_bank_features=128, conv_projections=[256, mel_dim], highway_features=128, gru_features=128, num_highways=4, ) def forward(self, x): return self.cbhg(x) class Decoder(nn.Module): """Tacotron decoder. Args: in_channels (int): number of input channels. frame_channels (int): number of feature frame channels. r (int): number of outputs per time step (reduction rate). memory_size (int): size of the past window. if <= 0 memory_size = r attn_type (string): type of attention used in decoder. attn_windowing (bool): if true, define an attention window centered to maximum attention response. It provides more robust attention alignment especially at interence time. attn_norm (string): attention normalization function. 'sigmoid' or 'softmax'. prenet_type (string): 'original' or 'bn'. prenet_dropout (float): prenet dropout rate. forward_attn (bool): if true, use forward attention method. https://arxiv.org/abs/1807.06736 trans_agent (bool): if true, use transition agent. https://arxiv.org/abs/1807.06736 forward_attn_mask (bool): if true, mask attention values smaller than a threshold. location_attn (bool): if true, use location sensitive attention. attn_K (int): number of attention heads for GravesAttention. separate_stopnet (bool): if true, detach stopnet input to prevent gradient flow. d_vector_dim (int): size of speaker embedding vector, for multi-speaker training. max_decoder_steps (int): Maximum number of steps allowed for the decoder. Defaults to 500. """ # Pylint gets confused by PyTorch conventions here # pylint: disable=attribute-defined-outside-init def __init__( self, in_channels, frame_channels, r, memory_size, attn_type, attn_windowing, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet, max_decoder_steps, ): super().__init__() self.r_init = r self.r = r self.in_channels = in_channels self.max_decoder_steps = max_decoder_steps self.use_memory_queue = memory_size > 0 self.memory_size = memory_size if memory_size > 0 else r self.frame_channels = frame_channels self.separate_stopnet = separate_stopnet self.query_dim = 256 # memory -> |Prenet| -> processed_memory prenet_dim = frame_channels * self.memory_size if self.use_memory_queue else frame_channels self.prenet = Prenet(prenet_dim, prenet_type, prenet_dropout, out_features=[256, 128]) # processed_inputs, processed_memory -> |Attention| -> Attention, attention, RNN_State # attention_rnn generates queries for the attention mechanism self.attention_rnn = nn.GRUCell(in_channels + 128, self.query_dim) self.attention = init_attn( attn_type=attn_type, query_dim=self.query_dim, embedding_dim=in_channels, attention_dim=128, location_attention=location_attn, attention_location_n_filters=32, attention_location_kernel_size=31, windowing=attn_windowing, norm=attn_norm, forward_attn=forward_attn, trans_agent=trans_agent, forward_attn_mask=forward_attn_mask, attn_K=attn_K, ) # (processed_memory | attention context) -> |Linear| -> decoder_RNN_input self.project_to_decoder_in = nn.Linear(256 + in_channels, 256) # decoder_RNN_input -> |RNN| -> RNN_state self.decoder_rnns = nn.ModuleList([nn.GRUCell(256, 256) for _ in range(2)]) # RNN_state -> |Linear| -> mel_spec self.proj_to_mel = nn.Linear(256, frame_channels * self.r_init) # learn init values instead of zero init. self.stopnet = StopNet(256 + frame_channels * self.r_init) def set_r(self, new_r): self.r = new_r def _reshape_memory(self, memory): """ Reshape the spectrograms for given 'r' """ # Grouping multiple frames if necessary if memory.size(-1) == self.frame_channels: memory = memory.view(memory.shape[0], memory.size(1) // self.r, -1) # Time first (T_decoder, B, frame_channels) memory = memory.transpose(0, 1) return memory def _init_states(self, inputs): """ Initialization of decoder states """ B = inputs.size(0) # go frame as zeros matrix if self.use_memory_queue: self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels * self.memory_size) else: self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels) # decoder states self.attention_rnn_hidden = torch.zeros(1, device=inputs.device).repeat(B, 256) self.decoder_rnn_hiddens = [ torch.zeros(1, device=inputs.device).repeat(B, 256) for idx in range(len(self.decoder_rnns)) ] self.context_vec = inputs.data.new(B, self.in_channels).zero_() # cache attention inputs self.processed_inputs = self.attention.preprocess_inputs(inputs) def _parse_outputs(self, outputs, attentions, stop_tokens): # Back to batch first attentions = torch.stack(attentions).transpose(0, 1) stop_tokens = torch.stack(stop_tokens).transpose(0, 1) outputs = torch.stack(outputs).transpose(0, 1).contiguous() outputs = outputs.view(outputs.size(0), -1, self.frame_channels) outputs = outputs.transpose(1, 2) return outputs, attentions, stop_tokens def decode(self, inputs, mask=None): # Prenet processed_memory = self.prenet(self.memory_input) # Attention RNN self.attention_rnn_hidden = self.attention_rnn( torch.cat((processed_memory, self.context_vec), -1), self.attention_rnn_hidden ) self.context_vec = self.attention(self.attention_rnn_hidden, inputs, self.processed_inputs, mask) # Concat RNN output and attention context vector decoder_input = self.project_to_decoder_in(torch.cat((self.attention_rnn_hidden, self.context_vec), -1)) # Pass through the decoder RNNs for idx, decoder_rnn in enumerate(self.decoder_rnns): self.decoder_rnn_hiddens[idx] = decoder_rnn(decoder_input, self.decoder_rnn_hiddens[idx]) # Residual connection decoder_input = self.decoder_rnn_hiddens[idx] + decoder_input decoder_output = decoder_input # predict mel vectors from decoder vectors output = self.proj_to_mel(decoder_output) # output = torch.sigmoid(output) # predict stop token stopnet_input = torch.cat([decoder_output, output], -1) if self.separate_stopnet: stop_token = self.stopnet(stopnet_input.detach()) else: stop_token = self.stopnet(stopnet_input) output = output[:, : self.r * self.frame_channels] return output, stop_token, self.attention.attention_weights def _update_memory_input(self, new_memory): if self.use_memory_queue: if self.memory_size > self.r: # memory queue size is larger than number of frames per decoder iter self.memory_input = torch.cat( [new_memory, self.memory_input[:, : (self.memory_size - self.r) * self.frame_channels].clone()], dim=-1, ) else: # memory queue size smaller than number of frames per decoder iter self.memory_input = new_memory[:, : self.memory_size * self.frame_channels] else: # use only the last frame prediction # assert new_memory.shape[-1] == self.r * self.frame_channels self.memory_input = new_memory[:, self.frame_channels * (self.r - 1) :] def forward(self, inputs, memory, mask): """ Args: inputs: Encoder outputs. memory: Decoder memory (autoregression. If None (at eval-time), decoder outputs are used as decoder inputs. If None, it uses the last output as the input. mask: Attention mask for sequence padding. Shapes: - inputs: (B, T, D_out_enc) - memory: (B, T_mel, D_mel) """ # Run greedy decoding if memory is None memory = self._reshape_memory(memory) outputs = [] attentions = [] stop_tokens = [] t = 0 self._init_states(inputs) self.attention.init_states(inputs) while len(outputs) < memory.size(0): if t > 0: new_memory = memory[t - 1] self._update_memory_input(new_memory) output, stop_token, attention = self.decode(inputs, mask) outputs += [output] attentions += [attention] stop_tokens += [stop_token.squeeze(1)] t += 1 return self._parse_outputs(outputs, attentions, stop_tokens) def inference(self, inputs): """ Args: inputs: encoder outputs. Shapes: - inputs: batch x time x encoder_out_dim """ outputs = [] attentions = [] stop_tokens = [] t = 0 self._init_states(inputs) self.attention.init_states(inputs) while True: if t > 0: new_memory = outputs[-1] self._update_memory_input(new_memory) output, stop_token, attention = self.decode(inputs, None) stop_token = torch.sigmoid(stop_token.data) outputs += [output] attentions += [attention] stop_tokens += [stop_token] t += 1 if t > inputs.shape[1] / 4 and (stop_token > 0.6 or attention[:, -1].item() > 0.6): break if t > self.max_decoder_steps: print(" | > Decoder stopped with 'max_decoder_steps") break return self._parse_outputs(outputs, attentions, stop_tokens) class StopNet(nn.Module): r"""Stopnet signalling decoder to stop inference. Args: in_features (int): feature dimension of input. """ def __init__(self, in_features): super().__init__() self.dropout = nn.Dropout(0.1) self.linear = nn.Linear(in_features, 1) torch.nn.init.xavier_uniform_(self.linear.weight, gain=torch.nn.init.calculate_gain("linear")) def forward(self, inputs): outputs = self.dropout(inputs) outputs = self.linear(outputs) return outputs
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/mapped-file.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_MAPPED_FILE_H_ #define FST_MAPPED_FILE_H_ #include <cstddef> #include <istream> #include <string> #include <fst/compat.h> #include <fst/flags.h> namespace fst { // A memory region is a simple abstraction for allocated memory or data from // memory-mapped files. If mmap is null, then data represents an owned region // of size bytes. Otherwise, mmap and size refer to the mapping and data is a // casted pointer to a region contained within [mmap, mmap + size). If size is // 0, then mmap and data refer to a block of memory managed externally by some // other allocator. The offset is used when allocating memory to providing // padding for alignment. struct MemoryRegion { void *data; void *mmap; size_t size; int offset; }; class MappedFile { public: ~MappedFile(); void *mutable_data() const { return region_.data; } const void *data() const { return region_.data; } // Returns a MappedFile object that contains the contents of the input stream // strm starting from the current file position with size bytes. The memorymap // bool is advisory, and Map will default to allocating and reading. The // source argument needs to contain the filename that was used to open the // input stream. static MappedFile *Map(std::istream *istrm, bool memorymap, const string &source, size_t size); // Creates a MappedFile object with a new[]'ed block of memory of size. The // align argument can be used to specify a desired block alignment. // This is RECOMMENDED FOR INTERNAL USE ONLY as it may change in future // releases. static MappedFile *Allocate(size_t size, int align = kArchAlignment); // Creates a MappedFile object pointing to a borrowed reference to data. This // block of memory is not owned by the MappedFile object and will not be // freed. This is RECOMMENDED FOR INTERNAL USE ONLY, may change in future // releases. static MappedFile *Borrow(void *data); // Alignment required for mapping structures in bytes. Regions of memory that // are not aligned upon a 128-bit boundary are read from the file instead. // This is consistent with the alignment boundary set in ConstFst and // CompactFst. static constexpr int kArchAlignment = 16; static constexpr size_t kMaxReadChunk = 256 * 1024 * 1024; // 256 MB. private: explicit MappedFile(const MemoryRegion &region); MemoryRegion region_; MappedFile(const MappedFile &) = delete; MappedFile &operator=(const MappedFile &) = delete; }; } // namespace fst #endif // FST_MAPPED_FILE_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/disambiguate.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to disambiguate an FST. #ifndef FST_DISAMBIGUATE_H_ #define FST_DISAMBIGUATE_H_ #include <list> #include <map> #include <set> #include <utility> #include <vector> #include <fst/arcsort.h> #include <fst/compose.h> #include <fst/connect.h> #include <fst/determinize.h> #include <fst/dfs-visit.h> #include <fst/project.h> #include <fst/prune.h> #include <fst/state-map.h> #include <fst/state-table.h> #include <fst/union-find.h> #include <fst/verify.h> namespace fst { template <class Arc> struct DisambiguateOptions : public DeterminizeOptions<Arc> { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit DisambiguateOptions(float delta = kDelta, Weight weight = Weight::Zero(), StateId n = kNoStateId, Label label = 0) : DeterminizeOptions<Arc>(delta, std::move(weight), n, label, DETERMINIZE_FUNCTIONAL) {} }; namespace internal { // A determinization filter based on a subset element relation. The relation is // assumed to be reflexive and symmetric. template <class Arc, class Relation> class RelationDeterminizeFilter { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using FilterState = IntegerFilterState<StateId>; using StateTuple = DeterminizeStateTuple<Arc, FilterState>; using Subset = typename StateTuple::Subset; using Element = typename StateTuple::Element; using LabelMap = std::multimap<Label, DeterminizeArc<StateTuple>>; // This is needed (e.g.) to go into the gallic domain for transducers; there // is no need to rebind the relation since its use here only depends on the // state IDs. template <class A> struct rebind { using Other = RelationDeterminizeFilter<A, Relation>; }; explicit RelationDeterminizeFilter(const Fst<Arc> &fst) : fst_(fst.Copy()), r_(new Relation()), s_(kNoStateId), head_(nullptr) {} // Ownership of the relation is given to this class. RelationDeterminizeFilter(const Fst<Arc> &fst, Relation *r) : fst_(fst.Copy()), r_(r), s_(kNoStateId), head_(0) {} // Ownership of the relation is given to this class. RelationDeterminizeFilter(const Fst<Arc> &fst, Relation *r, std::vector<StateId> *head) : fst_(fst.Copy()), r_(r), s_(kNoStateId), head_(head) {} // This is needed, e.g., to go into the gallic domain for transducers. // Ownership of the templated filter argument is given to this class. template <class Filter> RelationDeterminizeFilter(const Fst<Arc> &fst, Filter *filter) : fst_(fst.Copy()), r_(new Relation(filter->GetRelation())), s_(kNoStateId), head_(filter->GetHeadStates()) { delete filter; } // Copy constructor; the FST can be passed if it has been deep-copied. RelationDeterminizeFilter(const RelationDeterminizeFilter &filter, const Fst<Arc> *fst = nullptr) : fst_(fst ? fst->Copy() : filter.fst_->Copy()), r_(new Relation(*filter.r_)), s_(kNoStateId), head_() {} FilterState Start() const { return FilterState(fst_->Start()); } void SetState(StateId s, const StateTuple &tuple) { if (s_ != s) { s_ = s; tuple_ = &tuple; const auto head = tuple.filter_state.GetState(); is_final_ = fst_->Final(head) != Weight::Zero(); if (head_) { if (head_->size() <= s) head_->resize(s + 1, kNoStateId); (*head_)[s] = head; } } } // Filters transition, possibly modifying label map. Returns true if arc is // added to label map. bool FilterArc(const Arc &arc, const Element &src_element, const Element &dest_element, LabelMap *label_map) const; // Filters super-final transition, returning new final weight. Weight FilterFinal(const Weight final_weight, const Element &element) const { return is_final_ ? final_weight : Weight::Zero(); } static uint64_t Properties(uint64_t props) { return props & ~(kIDeterministic | kODeterministic); } const Relation &GetRelation() { return *r_; } std::vector<StateId> *GetHeadStates() { return head_; } private: // Pairs arc labels with state tuples with possible heads and empty subsets. void InitLabelMap(LabelMap *label_map) const; std::unique_ptr<Fst<Arc>> fst_; // Input FST. std::unique_ptr<Relation> r_; // Relation compatible with inv. trans. fnc. StateId s_; // Current state. const StateTuple *tuple_; // Current tuple. bool is_final_; // Is the current head state final? std::vector<StateId> *head_; // Head state for a given state, // owned by the Disambiguator. }; template <class Arc, class Relation> bool RelationDeterminizeFilter<Arc, Relation>::FilterArc( const Arc &arc, const Element &src_element, const Element &dest_element, LabelMap *label_map) const { bool added = false; if (label_map->empty()) InitLabelMap(label_map); // Adds element to state tuple if element state is related to tuple head. for (auto liter = label_map->lower_bound(arc.ilabel); liter != label_map->end() && liter->first == arc.ilabel; ++liter) { auto *dest_tuple = liter->second.dest_tuple; const auto dest_head = dest_tuple->filter_state.GetState(); if ((*r_)(dest_element.state_id, dest_head)) { dest_tuple->subset.push_front(dest_element); added = true; } } return added; } template <class Arc, class Relation> void RelationDeterminizeFilter<Arc, Relation>::InitLabelMap( LabelMap *label_map) const { const auto src_head = tuple_->filter_state.GetState(); Label label = kNoLabel; StateId nextstate = kNoStateId; for (ArcIterator<Fst<Arc>> aiter(*fst_, src_head); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); // Continues if multiarc. if (arc.ilabel == label && arc.nextstate == nextstate) continue; DeterminizeArc<StateTuple> det_arc(arc); det_arc.dest_tuple->filter_state = FilterState(arc.nextstate); label_map->insert(std::make_pair(arc.ilabel, det_arc)); label = arc.ilabel; nextstate = arc.nextstate; } } // Helper class to disambiguate an FST via Disambiguate(). template <class Arc> class Disambiguator { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // IDs arcs with state ID and arc position. Arc position -1 indicates final // (super-final transition). using ArcId = std::pair<StateId, std::ptrdiff_t>; Disambiguator() : error_(false) {} void Disambiguate( const Fst<Arc> &ifst, MutableFst<Arc> *ofst, const DisambiguateOptions<Arc> &opts = DisambiguateOptions<Arc>()) { VectorFst<Arc> sfst(ifst); Connect(&sfst); ArcSort(&sfst, ArcCompare()); PreDisambiguate(sfst, ofst, opts); ArcSort(ofst, ArcCompare()); FindAmbiguities(*ofst); RemoveSplits(ofst); MarkAmbiguities(); RemoveAmbiguities(ofst); if (error_) ofst->SetProperties(kError, kError); } private: // Comparison functor for comparing input labels and next states of arcs. This // sort order facilitates the predisambiguation. class ArcCompare { public: bool operator()(const Arc &arc1, const Arc &arc2) const { return arc1.ilabel < arc2.ilabel || (arc1.ilabel == arc2.ilabel && arc1.nextstate < arc2.nextstate); } uint64_t Properties(uint64_t props) const { return (props & kArcSortProperties) | kILabelSorted | (props & kAcceptor ? kOLabelSorted : 0); } }; // Comparison functor for comparing transitions represented by their arc ID. // This sort order facilitates ambiguity detection. class ArcIdCompare { public: explicit ArcIdCompare(const std::vector<StateId> &head) : head_(head) {} bool operator()(const ArcId &a1, const ArcId &a2) const { // Sort first by source head state... const auto src1 = a1.first; const auto src2 = a2.first; const auto head1 = head_[src1]; const auto head2 = head_[src2]; if (head1 < head2) return true; if (head2 < head1) return false; // ...then by source state... if (src1 < src2) return true; if (src2 < src1) return false; // ...then by position. return a1.second < a2.second; } private: const std::vector<StateId> &head_; }; // A relation that determines if two states share a common future. class CommonFuture { public: using StateTable = GenericComposeStateTable<Arc, TrivialFilterState>; using StateTuple = typename StateTable::StateTuple; // Needed for compilation with DeterminizeRelationFilter. CommonFuture() { FSTERROR() << "Disambiguate::CommonFuture: FST not provided"; } explicit CommonFuture(const Fst<Arc> &ifst) { using M = Matcher<Fst<Arc>>; ComposeFstOptions<Arc, M, NullComposeFilter<M>> opts; // Ensures composition is between acceptors. const bool trans = ifst.Properties(kNotAcceptor, true); const auto *fsa = trans ? new ProjectFst<Arc>(ifst, PROJECT_INPUT) : &ifst; opts.state_table = new StateTable(*fsa, *fsa); const ComposeFst<Arc> cfst(*fsa, *fsa, opts); std::vector<bool> coaccess; uint64_t props = 0; SccVisitor<Arc> scc_visitor(nullptr, nullptr, &coaccess, &props); DfsVisit(cfst, &scc_visitor); for (StateId s = 0; s < coaccess.size(); ++s) { if (coaccess[s]) { related_.insert(opts.state_table->Tuple(s).StatePair()); } } if (trans) delete fsa; } bool operator()(const StateId s1, StateId s2) const { return related_.count(std::make_pair(s1, s2)) > 0; } private: // States s1 and s2 resp. are in this relation iff they there is a // path from s1 to a final state that has the same label as some // path from s2 to a final state. std::set<std::pair<StateId, StateId>> related_; }; using ArcIdMap = std::multimap<ArcId, ArcId, ArcIdCompare>; // Inserts candidate into the arc ID map. inline void InsertCandidate(StateId s1, StateId s2, const ArcId &a1, const ArcId &a2) { candidates_->insert(head_[s1] > head_[s2] ? std::make_pair(a1, a2) : std::make_pair(a2, a1)); } // Returns the arc corresponding to ArcId a. static Arc GetArc(const Fst<Arc> &fst, ArcId aid) { if (aid.second == -1) { // Returns super-final transition. return Arc(kNoLabel, kNoLabel, fst.Final(aid.first), kNoStateId); } else { ArcIterator<Fst<Arc>> aiter(fst, aid.first); aiter.Seek(aid.second); return aiter.Value(); } } // Outputs an equivalent FST whose states are subsets of states that have a // future path in common. void PreDisambiguate(const ExpandedFst<Arc> &ifst, MutableFst<Arc> *ofst, const DisambiguateOptions<Arc> &opts); // Finds transitions that are ambiguous candidates in the result of // PreDisambiguate. void FindAmbiguities(const ExpandedFst<Arc> &fst); // Finds transition pairs that are ambiguous candidates from two specified // source states. void FindAmbiguousPairs(const ExpandedFst<Arc> &fst, StateId s1, StateId s2); // Marks ambiguous transitions to be removed. void MarkAmbiguities(); // Deletes spurious ambiguous transitions (due to quantization). void RemoveSplits(MutableFst<Arc> *ofst); // Deletes actual ambiguous transitions. void RemoveAmbiguities(MutableFst<Arc> *ofst); // States s1 and s2 are in this relation iff there is a path from the initial // state to s1 that has the same label as some path from the initial state to // s2. We store only state pairs s1, s2 such that s1 <= s2. std::set<std::pair<StateId, StateId>> coreachable_; // Queue of disambiguation-related states to be processed. We store only // state pairs s1, s2 such that s1 <= s2. std::list<std::pair<StateId, StateId>> queue_; // Head state in the pre-disambiguation for a given state. std::vector<StateId> head_; // Maps from a candidate ambiguous arc A to each ambiguous candidate arc B // with the same label and destination state as A, whose source state s' is // coreachable with the source state s of A, and for which head(s') < head(s). std::unique_ptr<ArcIdMap> candidates_; // Set of ambiguous transitions to be removed. std::set<ArcId> ambiguous_; // States to merge due to quantization issues. std::unique_ptr<UnionFind<StateId>> merge_; // Marks error condition. bool error_; Disambiguator(const Disambiguator &) = delete; Disambiguator &operator=(const Disambiguator &) = delete; }; template <class Arc> void Disambiguator<Arc>::PreDisambiguate(const ExpandedFst<Arc> &ifst, MutableFst<Arc> *ofst, const DisambiguateOptions<Arc> &opts) { using CommonDivisor = DefaultCommonDivisor<Weight>; using Filter = RelationDeterminizeFilter<Arc, CommonFuture>; // Subset elements with states s1 and s2 (resp.) are in this relation iff they // there is a path from s1 to a final state that has the same label as some // path from s2 to a final state. auto *common_future = new CommonFuture(ifst); DeterminizeFstOptions<Arc, CommonDivisor, Filter> nopts; nopts.delta = opts.delta; nopts.subsequential_label = opts.subsequential_label; nopts.filter = new Filter(ifst, common_future, &head_); // The filter takes ownership of 'common_future', and determinization takes // ownership of the filter itself. nopts.gc_limit = 0; // Cache only the last state for fastest copy. if (opts.weight_threshold != Weight::Zero() || opts.state_threshold != kNoStateId) { /* TODO(riley): fails regression test; understand why if (ifst.Properties(kAcceptor, true)) { std::vector<Weight> idistance, odistance; ShortestDistance(ifst, &idistance, true); DeterminizeFst<Arc> dfst(ifst, &idistance, &odistance, nopts); PruneOptions< Arc, AnyArcFilter<Arc>> popts(opts.weight_threshold, opts.state_threshold, AnyArcFilter<Arc>(), &odistance); Prune(dfst, ofst, popts); } else */ { *ofst = DeterminizeFst<Arc>(ifst, nopts); Prune(ofst, opts.weight_threshold, opts.state_threshold); } } else { *ofst = DeterminizeFst<Arc>(ifst, nopts); } head_.resize(ofst->NumStates(), kNoStateId); } template <class Arc> void Disambiguator<Arc>::FindAmbiguities(const ExpandedFst<Arc> &fst) { if (fst.Start() == kNoStateId) return; candidates_.reset(new ArcIdMap(ArcIdCompare(head_))); const auto start_pr = std::make_pair(fst.Start(), fst.Start()); coreachable_.insert(start_pr); queue_.push_back(start_pr); while (!queue_.empty()) { const auto &pr = queue_.front(); const auto s1 = pr.first; const auto s2 = pr.second; queue_.pop_front(); FindAmbiguousPairs(fst, s1, s2); } } template <class Arc> void Disambiguator<Arc>::FindAmbiguousPairs(const ExpandedFst<Arc> &fst, StateId s1, StateId s2) { if (fst.NumArcs(s2) > fst.NumArcs(s1)) FindAmbiguousPairs(fst, s2, s1); SortedMatcher<Fst<Arc>> matcher(fst, MATCH_INPUT); matcher.SetState(s2); for (ArcIterator<Fst<Arc>> aiter(fst, s1); !aiter.Done(); aiter.Next()) { const auto &arc1 = aiter.Value(); const ArcId a1(s1, aiter.Position()); if (matcher.Find(arc1.ilabel)) { for (; !matcher.Done(); matcher.Next()) { const auto &arc2 = matcher.Value(); // Continues on implicit epsilon match. if (arc2.ilabel == kNoLabel) continue; const ArcId a2(s2, matcher.Position()); // Actual transition is ambiguous. if (s1 != s2 && arc1.nextstate == arc2.nextstate) { InsertCandidate(s1, s2, a1, a2); } const auto spr = arc1.nextstate <= arc2.nextstate ? std::make_pair(arc1.nextstate, arc2.nextstate) : std::make_pair(arc2.nextstate, arc1.nextstate); // Not already marked as coreachable? if (coreachable_.insert(spr).second) { // Only possible if state split by quantization issues. if (spr.first != spr.second && head_[spr.first] == head_[spr.second]) { if (!merge_) { merge_.reset(new UnionFind<StateId>(fst.NumStates(), kNoStateId)); merge_->MakeAllSet(fst.NumStates()); } merge_->Union(spr.first, spr.second); } else { queue_.push_back(spr); } } } } } // Super-final transition is ambiguous. if (s1 != s2 && fst.Final(s1) != Weight::Zero() && fst.Final(s2) != Weight::Zero()) { const ArcId a1(s1, -1); const ArcId a2(s2, -1); InsertCandidate(s1, s2, a1, a2); } } template <class Arc> void Disambiguator<Arc>::MarkAmbiguities() { if (!candidates_) return; for (auto it = candidates_->begin(); it != candidates_->end(); ++it) { const auto a = it->first; const auto b = it->second; // If b is not to be removed, then a is. if (ambiguous_.count(b) == 0) ambiguous_.insert(a); } coreachable_.clear(); candidates_.reset(); } template <class Arc> void Disambiguator<Arc>::RemoveSplits(MutableFst<Arc> *ofst) { if (!merge_) return; // Merges split states to remove spurious ambiguities. for (StateIterator<MutableFst<Arc>> siter(*ofst); !siter.Done(); siter.Next()) { for (MutableArcIterator<MutableFst<Arc>> aiter(ofst, siter.Value()); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); const auto nextstate = merge_->FindSet(arc.nextstate); if (nextstate != arc.nextstate) { arc.nextstate = nextstate; aiter.SetValue(arc); } } } // Repeats search for actual ambiguities on modified FST. coreachable_.clear(); merge_.reset(); candidates_.reset(); FindAmbiguities(*ofst); if (merge_) { // Shouldn't get here; sanity test. FSTERROR() << "Disambiguate: Unable to remove spurious ambiguities"; error_ = true; return; } } template <class Arc> void Disambiguator<Arc>::RemoveAmbiguities(MutableFst<Arc> *ofst) { if (ambiguous_.empty()) return; // Adds dead state to redirect ambiguous transitions to be removed. const auto dead = ofst->AddState(); for (auto it = ambiguous_.begin(); it != ambiguous_.end(); ++it) { const auto pos = it->second; if (pos >= 0) { // Actual transition. MutableArcIterator<MutableFst<Arc>> aiter(ofst, it->first); aiter.Seek(pos); auto arc = aiter.Value(); arc.nextstate = dead; aiter.SetValue(arc); } else { // Super-final transition. ofst->SetFinal(it->first, Weight::Zero()); } } Connect(ofst); ambiguous_.clear(); } } // namespace internal // Disambiguates a weighted FST. This version writes the disambiguated FST to an // output MutableFst. The result will be an equivalent FST that has the // property that there are not two distinct paths from the initial state to a // final state with the same input labeling. // // The weights must be (weakly) left divisible (valid for Tropical and // LogWeight). // // Complexity: // // Disambiguable: exponential (polynomial in the size of the output). // Non-disambiguable: does not terminate. // // The disambiguable transducers include all automata and functional transducers // that are unweighted or that are acyclic or that are unambiguous. // // For more information, see: // // Mohri, M. and Riley, M. 2015. On the disambiguation of weighted automata. // In CIAA, pages 263-278. template <class Arc> void Disambiguate( const Fst<Arc> &ifst, MutableFst<Arc> *ofst, const DisambiguateOptions<Arc> &opts = DisambiguateOptions<Arc>()) { internal::Disambiguator<Arc> disambiguator; disambiguator.Disambiguate(ifst, ofst, opts); } } // namespace fst #endif // FST_DISAMBIGUATE_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/script/compile-impl.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Class to to compile a binary FST from textual input. #ifndef FST_SCRIPT_COMPILE_IMPL_H_ #define FST_SCRIPT_COMPILE_IMPL_H_ #include <iostream> #include <memory> #include <sstream> #include <string> #include <vector> #include <fst/fst.h> #include <fst/util.h> #include <fst/vector-fst.h> #include <unordered_map> DECLARE_string(fst_field_separator); namespace fst { // Compile a binary Fst from textual input, helper class for fstcompile.cc // WARNING: Stand-alone use of this class not recommended, most code should // read/write using the binary format which is much more efficient. template <class Arc> class FstCompiler { public: using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // WARNING: use of negative labels not recommended as it may cause conflicts. // If add_symbols_ is true, then the symbols will be dynamically added to the // symbol tables. This is only useful if you set the (i/o)keep flag to attach // the final symbol table, or use the accessors. (The input symbol tables are // const and therefore not changed.) FstCompiler(std::istream &istrm, const string &source, // NOLINT const SymbolTable *isyms, const SymbolTable *osyms, const SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels = false) { std::unique_ptr<SymbolTable> misyms(isyms ? isyms->Copy() : nullptr); std::unique_ptr<SymbolTable> mosyms(osyms ? osyms->Copy() : nullptr); std::unique_ptr<SymbolTable> mssyms(ssyms ? ssyms->Copy() : nullptr); Init(istrm, source, misyms.get(), mosyms.get(), mssyms.get(), accep, ikeep, okeep, nkeep, allow_negative_labels, false); } FstCompiler(std::istream &istrm, const string &source, // NOLINT SymbolTable *isyms, SymbolTable *osyms, SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels, bool add_symbols) { Init(istrm, source, isyms, osyms, ssyms, accep, ikeep, okeep, nkeep, allow_negative_labels, add_symbols); } void Init(std::istream &istrm, const string &source, // NOLINT SymbolTable *isyms, SymbolTable *osyms, SymbolTable *ssyms, bool accep, bool ikeep, bool okeep, bool nkeep, bool allow_negative_labels, bool add_symbols) { nline_ = 0; source_ = source; isyms_ = isyms; osyms_ = osyms; ssyms_ = ssyms; nstates_ = 0; keep_state_numbering_ = nkeep; allow_negative_labels_ = allow_negative_labels; add_symbols_ = add_symbols; bool start_state_populated = false; char line[kLineLen]; const string separator = FLAGS_fst_field_separator + "\n"; while (istrm.getline(line, kLineLen)) { ++nline_; std::vector<char *> col; SplitString(line, separator.c_str(), &col, true); if (col.empty() || col[0][0] == '\0') continue; if (col.size() > 5 || (col.size() > 4 && accep) || (col.size() == 3 && !accep)) { FSTERROR() << "FstCompiler: Bad number of columns, source = " << source_ << ", line = " << nline_; fst_.SetProperties(kError, kError); return; } StateId s = StrToStateId(col[0]); while (s >= fst_.NumStates()) fst_.AddState(); if (!start_state_populated) { fst_.SetStart(s); start_state_populated = true; } Arc arc; StateId d = s; switch (col.size()) { case 1: fst_.SetFinal(s, Weight::One()); break; case 2: fst_.SetFinal(s, StrToWeight(col[1], true)); break; case 3: arc.nextstate = d = StrToStateId(col[1]); arc.ilabel = StrToILabel(col[2]); arc.olabel = arc.ilabel; arc.weight = Weight::One(); fst_.AddArc(s, arc); break; case 4: arc.nextstate = d = StrToStateId(col[1]); arc.ilabel = StrToILabel(col[2]); if (accep) { arc.olabel = arc.ilabel; arc.weight = StrToWeight(col[3], true); } else { arc.olabel = StrToOLabel(col[3]); arc.weight = Weight::One(); } fst_.AddArc(s, arc); break; case 5: arc.nextstate = d = StrToStateId(col[1]); arc.ilabel = StrToILabel(col[2]); arc.olabel = StrToOLabel(col[3]); arc.weight = StrToWeight(col[4], true); fst_.AddArc(s, arc); } while (d >= fst_.NumStates()) fst_.AddState(); } if (ikeep) fst_.SetInputSymbols(isyms); if (okeep) fst_.SetOutputSymbols(osyms); } const VectorFst<Arc> &Fst() const { return fst_; } private: // Maximum line length in text file. static constexpr int kLineLen = 8096; StateId StrToId(const char *s, SymbolTable *syms, const char *name, bool allow_negative = false) const { StateId n = 0; if (syms) { n = (add_symbols_) ? syms->AddSymbol(s) : syms->Find(s); if (n == -1 || (!allow_negative && n < 0)) { FSTERROR() << "FstCompiler: Symbol \"" << s << "\" is not mapped to any integer " << name << ", symbol table = " << syms->Name() << ", source = " << source_ << ", line = " << nline_; fst_.SetProperties(kError, kError); } } else { char *p; n = strtoll(s, &p, 10); if (p < s + strlen(s) || (!allow_negative && n < 0)) { FSTERROR() << "FstCompiler: Bad " << name << " integer = \"" << s << "\", source = " << source_ << ", line = " << nline_; fst_.SetProperties(kError, kError); } } return n; } StateId StrToStateId(const char *s) { StateId n = StrToId(s, ssyms_, "state ID"); if (keep_state_numbering_) return n; // Remaps state IDs to make dense set. const auto it = states_.find(n); if (it == states_.end()) { states_[n] = nstates_; return nstates_++; } else { return it->second; } } StateId StrToILabel(const char *s) const { return StrToId(s, isyms_, "arc ilabel", allow_negative_labels_); } StateId StrToOLabel(const char *s) const { return StrToId(s, osyms_, "arc olabel", allow_negative_labels_); } Weight StrToWeight(const char *s, bool allow_zero) const { Weight w; std::istringstream strm(s); strm >> w; if (!strm || (!allow_zero && w == Weight::Zero())) { FSTERROR() << "FstCompiler: Bad weight = \"" << s << "\", source = " << source_ << ", line = " << nline_; fst_.SetProperties(kError, kError); w = Weight::NoWeight(); } return w; } mutable VectorFst<Arc> fst_; size_t nline_; string source_; // Text FST source name. SymbolTable *isyms_; // ilabel symbol table (not owned). SymbolTable *osyms_; // olabel symbol table (not owned). SymbolTable *ssyms_; // slabel symbol table (not owned). std::unordered_map<StateId, StateId> states_; // State ID map. StateId nstates_; // Number of seen states. bool keep_state_numbering_; bool allow_negative_labels_; // Not recommended; may cause conflicts. bool add_symbols_; // Add to symbol tables on-the fly. FstCompiler(const FstCompiler &) = delete; FstCompiler &operator=(const FstCompiler &) = delete; }; } // namespace fst #endif // FST_SCRIPT_COMPILE_IMPL_H_
0
coqui_public_repos/xtts-streaming-server
coqui_public_repos/xtts-streaming-server/server/requirements.txt
TTS @ git+https://github.com/coqui-ai/TTS@fa28f99f1508b5b5366539b2149963edcb80ba62 uvicorn[standard]==0.23.2 fastapi==0.95.2 deepspeed==0.10.3 pydantic==1.10.13 python-multipart==0.0.6 typing-extensions>=4.8.0 numpy==1.24.3 cutlet mecab-python3==1.0.6 unidic-lite==1.0.8 unidic==1.1.0
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/import_swc.py
#!/usr/bin/env python """ Downloads and prepares (parts of) the "Spoken Wikipedia Corpora" for train.py Use "python3 import_swc.py -h" for help """ import argparse import csv import os import random import re import shutil import sys import tarfile import unicodedata import wave import xml.etree.ElementTree as ET from collections import Counter from glob import glob from multiprocessing.pool import ThreadPool import progressbar import sox from coqui_stt_ctcdecoder import Alphabet from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download from coqui_stt_training.util.importers import validate_label_eng as validate_label SWC_URL = "https://www2.informatik.uni-hamburg.de/nats/pub/SWC/SWC_{language}.tar" SWC_ARCHIVE = "SWC_{language}.tar" LANGUAGES = ["dutch", "english", "german"] FIELDNAMES = ["wav_filename", "wav_filesize", "transcript"] FIELDNAMES_EXT = FIELDNAMES + ["article", "speaker"] CHANNELS = 1 SAMPLE_RATE = 16000 UNKNOWN = "<unknown>" AUDIO_PATTERN = "audio*.ogg" WAV_NAME = "audio.wav" ALIGNED_NAME = "aligned.swc" SUBSTITUTIONS = { "german": [ (re.compile(r"\$"), "dollar"), (re.compile(r"€"), "euro"), (re.compile(r"£"), "pfund"), ( re.compile(r"ein tausend ([^\s]+) hundert ([^\s]+) er( |$)"), r"\1zehnhundert \2er ", ), (re.compile(r"ein tausend (acht|neun) hundert"), r"\1zehnhundert"), ( re.compile( r"eins punkt null null null punkt null null null punkt null null null" ), "eine milliarde", ), ( re.compile( r"punkt null null null punkt null null null punkt null null null" ), "milliarden", ), (re.compile(r"eins punkt null null null punkt null null null"), "eine million"), (re.compile(r"punkt null null null punkt null null null"), "millionen"), (re.compile(r"eins punkt null null null"), "ein tausend"), (re.compile(r"punkt null null null"), "tausend"), (re.compile(r"punkt null"), None), ] } DONT_NORMALIZE = {"german": "ÄÖÜäöüß"} PRE_FILTER = str.maketrans(dict.fromkeys("/()[]{}<>:")) class Sample: def __init__(self, wav_path, start, end, text, article, speaker, sub_set=None): self.wav_path = wav_path self.start = start self.end = end self.text = text self.article = article self.speaker = speaker self.sub_set = sub_set def fail(message): print(message) sys.exit(1) def group(lst, get_key): groups = {} for obj in lst: key = get_key(obj) if key in groups: groups[key].append(obj) else: groups[key] = [obj] return groups def get_sample_size(population_size): margin_of_error = 0.01 fraction_picking = 0.50 z_score = 2.58 # Corresponds to confidence level 99% numerator = (z_score**2 * fraction_picking * (1 - fraction_picking)) / ( margin_of_error**2 ) sample_size = 0 for train_size in range(population_size, 0, -1): denominator = 1 + (z_score**2 * fraction_picking * (1 - fraction_picking)) / ( margin_of_error**2 * train_size ) sample_size = int(numerator / denominator) if 2 * sample_size + train_size <= population_size: break return sample_size def maybe_download_language(language): lang_upper = language[0].upper() + language[1:] return maybe_download( SWC_ARCHIVE.format(language=lang_upper), CLI_ARGS.base_dir, SWC_URL.format(language=lang_upper), ) def maybe_extract(data_dir, extracted_data, archive): extracted = os.path.join(data_dir, extracted_data) if os.path.isdir(extracted): print('Found directory "{}" - not extracting.'.format(extracted)) else: print('Extracting "{}"...'.format(archive)) with tarfile.open(archive) as tar: members = tar.getmembers() bar = progressbar.ProgressBar(max_value=len(members), widgets=SIMPLE_BAR) for member in bar(members): tar.extract(member=member, path=extracted) return extracted def ignored(node): if node is None: return False if node.tag == "ignored": return True return ignored(node.find("..")) def read_token(token): texts, start, end = [], None, None notes = token.findall("n") if len(notes) > 0: for note in notes: attributes = note.attrib if start is None and "start" in attributes: start = int(attributes["start"]) if "end" in attributes: token_end = int(attributes["end"]) if end is None or token_end > end: end = token_end if "pronunciation" in attributes: t = attributes["pronunciation"] texts.append(t) elif "text" in token.attrib: texts.append(token.attrib["text"]) return start, end, " ".join(texts) def in_alphabet(alphabet, c): return alphabet.CanEncode(c) if alphabet else True ALPHABETS = {} def get_alphabet(language): if language in ALPHABETS: return ALPHABETS[language] alphabet_path = getattr(CLI_ARGS, language + "_alphabet") alphabet = Alphabet(alphabet_path) if alphabet_path else None ALPHABETS[language] = alphabet return alphabet def label_filter(label, language): label = label.translate(PRE_FILTER) label = validate_label(label) if label is None: return None, "validation" substitutions = SUBSTITUTIONS[language] if language in SUBSTITUTIONS else [] for pattern, replacement in substitutions: if replacement is None: if pattern.match(label): return None, "substitution rule" else: label = pattern.sub(replacement, label) chars = [] dont_normalize = DONT_NORMALIZE[language] if language in DONT_NORMALIZE else "" alphabet = get_alphabet(language) for c in label: if ( CLI_ARGS.normalize and c not in dont_normalize and not in_alphabet(alphabet, c) ): c = ( unicodedata.normalize("NFKD", c) .encode("ascii", "ignore") .decode("ascii", "ignore") ) for sc in c: if not in_alphabet(alphabet, sc): return None, "illegal character" chars.append(sc) label = "".join(chars) label = validate_label(label) return label, "validation" if label is None else None def collect_samples(base_dir, language): roots = [] for root, _, files in os.walk(base_dir): if ALIGNED_NAME in files and WAV_NAME in files: roots.append(root) samples = [] reasons = Counter() def add_sample( p_wav_path, p_article, p_speaker, p_start, p_end, p_text, p_reason="complete" ): if p_start is not None and p_end is not None and p_text is not None: duration = p_end - p_start text, filter_reason = label_filter(p_text, language) skip = False if filter_reason is not None: skip = True p_reason = filter_reason elif CLI_ARGS.exclude_unknown_speakers and p_speaker == UNKNOWN: skip = True p_reason = "unknown speaker" elif CLI_ARGS.exclude_unknown_articles and p_article == UNKNOWN: skip = True p_reason = "unknown article" elif duration > CLI_ARGS.max_duration > 0 and CLI_ARGS.ignore_too_long: skip = True p_reason = "exceeded duration" elif int(duration / 30) < len(text): skip = True p_reason = "too short to decode" elif duration / len(text) < 10: skip = True p_reason = "length duration ratio" if skip: reasons[p_reason] += 1 else: samples.append( Sample(p_wav_path, p_start, p_end, text, p_article, p_speaker) ) elif p_start is None or p_end is None: reasons["missing timestamps"] += 1 else: reasons["missing text"] += 1 print("Collecting samples...") bar = progressbar.ProgressBar(max_value=len(roots), widgets=SIMPLE_BAR) for root in bar(roots): wav_path = os.path.join(root, WAV_NAME) aligned = ET.parse(os.path.join(root, ALIGNED_NAME)) article = UNKNOWN speaker = UNKNOWN for prop in aligned.iter("prop"): attributes = prop.attrib if "key" in attributes and "value" in attributes: if attributes["key"] == "DC.identifier": article = attributes["value"] elif attributes["key"] == "reader.name": speaker = attributes["value"] for sentence in aligned.iter("s"): if ignored(sentence): continue split = False tokens = list(map(read_token, sentence.findall("t"))) sample_start, sample_end, token_texts, sample_texts = None, None, [], [] for token_start, token_end, token_text in tokens: if CLI_ARGS.exclude_numbers and any(c.isdigit() for c in token_text): add_sample( wav_path, article, speaker, sample_start, sample_end, " ".join(sample_texts), p_reason="has numbers", ) sample_start, sample_end, token_texts, sample_texts = ( None, None, [], [], ) continue if sample_start is None: sample_start = token_start if sample_start is None: continue token_texts.append(token_text) if token_end is not None: if ( token_start != sample_start and token_end - sample_start > CLI_ARGS.max_duration > 0 ): add_sample( wav_path, article, speaker, sample_start, sample_end, " ".join(sample_texts), p_reason="split", ) sample_start = sample_end sample_texts = [] split = True sample_end = token_end sample_texts.extend(token_texts) token_texts = [] add_sample( wav_path, article, speaker, sample_start, sample_end, " ".join(sample_texts), p_reason="split" if split else "complete", ) print("Skipped samples:") for reason, n in reasons.most_common(): print(" - {}: {}".format(reason, n)) return samples def maybe_convert_one_to_wav(entry): root, _, files = entry transformer = sox.Transformer() transformer.convert(samplerate=SAMPLE_RATE, n_channels=CHANNELS) combiner = sox.Combiner() combiner.convert(samplerate=SAMPLE_RATE, n_channels=CHANNELS) output_wav = os.path.join(root, WAV_NAME) if os.path.isfile(output_wav): return files = sorted(glob(os.path.join(root, AUDIO_PATTERN))) try: if len(files) == 1: transformer.build(files[0], output_wav) elif len(files) > 1: wav_files = [] for i, file in enumerate(files): wav_path = os.path.join(root, "audio{}.wav".format(i)) transformer.build(file, wav_path) wav_files.append(wav_path) combiner.set_input_format(file_type=["wav"] * len(wav_files)) combiner.build(wav_files, output_wav, "concatenate") except sox.core.SoxError: return def maybe_convert_to_wav(base_dir): roots = list(os.walk(base_dir)) print("Converting and joining source audio files...") bar = progressbar.ProgressBar(max_value=len(roots), widgets=SIMPLE_BAR) tp = ThreadPool() for _ in bar(tp.imap_unordered(maybe_convert_one_to_wav, roots)): pass tp.close() tp.join() def assign_sub_sets(samples): sample_size = get_sample_size(len(samples)) speakers = group(samples, lambda sample: sample.speaker).values() speakers = list(sorted(speakers, key=len)) sample_sets = [[], []] while any(map(lambda s: len(s) < sample_size, sample_sets)) and len(speakers) > 0: for sample_set in sample_sets: if len(sample_set) < sample_size and len(speakers) > 0: sample_set.extend(speakers.pop(0)) train_set = sum(speakers, []) if len(train_set) == 0: print( "WARNING: Unable to build dev and test sets without speaker bias as there is no speaker meta data" ) random.seed(42) # same source data == same output random.shuffle(samples) for index, sample in enumerate(samples): if index < sample_size: sample.sub_set = "dev" elif index < 2 * sample_size: sample.sub_set = "test" else: sample.sub_set = "train" else: for sub_set, sub_set_samples in [ ("train", train_set), ("dev", sample_sets[0]), ("test", sample_sets[1]), ]: for sample in sub_set_samples: sample.sub_set = sub_set for sub_set, sub_set_samples in group(samples, lambda s: s.sub_set).items(): t = sum(map(lambda s: s.end - s.start, sub_set_samples)) / (1000 * 60 * 60) print( 'Sub-set "{}" with {} samples (duration: {:.2f} h)'.format( sub_set, len(sub_set_samples), t ) ) def create_sample_dirs(language): print("Creating sample directories...") for set_name in ["train", "dev", "test"]: dir_path = os.path.join(CLI_ARGS.base_dir, language + "-" + set_name) if not os.path.isdir(dir_path): os.mkdir(dir_path) def split_audio_files(samples, language): print("Splitting audio files...") sub_sets = Counter() src_wav_files = group(samples, lambda s: s.wav_path).items() bar = progressbar.ProgressBar(max_value=len(src_wav_files), widgets=SIMPLE_BAR) for wav_path, file_samples in bar(src_wav_files): file_samples = sorted(file_samples, key=lambda s: s.start) with wave.open(wav_path, "r") as src_wav_file: rate = src_wav_file.getframerate() for sample in file_samples: index = sub_sets[sample.sub_set] sample_wav_path = os.path.join( CLI_ARGS.base_dir, language + "-" + sample.sub_set, "sample-{0:06d}.wav".format(index), ) sample.wav_path = sample_wav_path sub_sets[sample.sub_set] += 1 src_wav_file.setpos(int(sample.start * rate / 1000.0)) data = src_wav_file.readframes( int((sample.end - sample.start) * rate / 1000.0) ) with wave.open(sample_wav_path, "w") as sample_wav_file: sample_wav_file.setnchannels(src_wav_file.getnchannels()) sample_wav_file.setsampwidth(src_wav_file.getsampwidth()) sample_wav_file.setframerate(rate) sample_wav_file.writeframes(data) def write_csvs(samples, language): for sub_set, set_samples in group(samples, lambda s: s.sub_set).items(): set_samples = sorted(set_samples, key=lambda s: s.wav_path) base_dir = os.path.abspath(CLI_ARGS.base_dir) csv_path = os.path.join(base_dir, language + "-" + sub_set + ".csv") print('Writing "{}"...'.format(csv_path)) with open(csv_path, "w", encoding="utf-8", newline="") as csv_file: writer = csv.DictWriter( csv_file, fieldnames=FIELDNAMES_EXT if CLI_ARGS.add_meta else FIELDNAMES ) writer.writeheader() bar = progressbar.ProgressBar( max_value=len(set_samples), widgets=SIMPLE_BAR ) for sample in bar(set_samples): row = { "wav_filename": os.path.relpath(sample.wav_path, base_dir), "wav_filesize": os.path.getsize(sample.wav_path), "transcript": sample.text, } if CLI_ARGS.add_meta: row["article"] = sample.article row["speaker"] = sample.speaker writer.writerow(row) def cleanup(archive, language): if not CLI_ARGS.keep_archive: print('Removing archive "{}"...'.format(archive)) os.remove(archive) language_dir = os.path.join(CLI_ARGS.base_dir, language) if not CLI_ARGS.keep_intermediate and os.path.isdir(language_dir): print('Removing intermediate files in "{}"...'.format(language_dir)) shutil.rmtree(language_dir) def prepare_language(language): archive = maybe_download_language(language) extracted = maybe_extract(CLI_ARGS.base_dir, language, archive) maybe_convert_to_wav(extracted) samples = collect_samples(extracted, language) assign_sub_sets(samples) create_sample_dirs(language) split_audio_files(samples, language) write_csvs(samples, language) cleanup(archive, language) def handle_args(): parser = argparse.ArgumentParser(description="Import Spoken Wikipedia Corpora") parser.add_argument("base_dir", help="Directory containing all data") parser.add_argument( "--language", default="all", help="One of (all|{})".format("|".join(LANGUAGES)) ) parser.add_argument( "--exclude_numbers", type=bool, default=True, help="If sequences with non-transliterated numbers should be excluded", ) parser.add_argument( "--max_duration", type=int, default=10000, help="Maximum sample duration in milliseconds", ) parser.add_argument( "--ignore_too_long", type=bool, default=False, help="If samples exceeding max_duration should be removed", ) parser.add_argument( "--normalize", action="store_true", help="Converts diacritic characters to their base ones", ) for language in LANGUAGES: parser.add_argument( "--{}_alphabet".format(language), help="Exclude {} samples with characters not in provided alphabet file".format( language ), ) parser.add_argument( "--add_meta", action="store_true", help="Adds article and speaker CSV columns" ) parser.add_argument( "--exclude_unknown_speakers", action="store_true", help="Exclude unknown speakers", ) parser.add_argument( "--exclude_unknown_articles", action="store_true", help="Exclude unknown articles", ) parser.add_argument( "--keep_archive", type=bool, default=True, help="If downloaded archives should be kept", ) parser.add_argument( "--keep_intermediate", type=bool, default=False, help="If intermediate files should be kept", ) return parser.parse_args() if __name__ == "__main__": CLI_ARGS = handle_args() if CLI_ARGS.language == "all": for lang in LANGUAGES: prepare_language(lang) elif CLI_ARGS.language in LANGUAGES: prepare_language(CLI_ARGS.language) else: fail("Wrong language id")
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstconvert-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Converts an FST to another type. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/script/convert.h> DECLARE_string(fst_type); int fstconvert_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; string usage = "Converts an FST to another type.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; if (ifst->FstType() != FLAGS_fst_type) { std::unique_ptr<FstClass> ofst(s::Convert(*ifst, FLAGS_fst_type)); if (!ofst) return 1; return !ofst->Write(out_name); } else { return !ifst->Write(out_name); } }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/const/const16-fst.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/fst.h> #include <fst/const-fst.h> namespace fst { static FstRegisterer<ConstFst<StdArc, uint16>> ConstFst_StdArc_uint16_registerer; static FstRegisterer<ConstFst<LogArc, uint16>> ConstFst_LogArc_uint16_registerer; static FstRegisterer<ConstFst<Log64Arc, uint16>> ConstFst_Log64Arc_uint16_registerer; } // namespace fst
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/connect.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes and functions to remove unsuccessful paths from an FST. #ifndef FST_CONNECT_H_ #define FST_CONNECT_H_ #include <vector> #include <fst/dfs-visit.h> #include <fst/mutable-fst.h> #include <fst/union-find.h> namespace fst { // Finds and returns connected components. Use with Visit(). template <class Arc> class CcVisitor { public: using Weight = typename Arc::Weight; using StateId = typename Arc::StateId; // cc[i]: connected component number for state i. explicit CcVisitor(std::vector<StateId> *cc) : comps_(new UnionFind<StateId>(0, kNoStateId)), cc_(cc), nstates_(0) {} // comps: connected components equiv classes. explicit CcVisitor(UnionFind<StateId> *comps) : comps_(comps), cc_(nullptr), nstates_(0) {} ~CcVisitor() { if (cc_) delete comps_; } void InitVisit(const Fst<Arc> &fst) {} bool InitState(StateId s, StateId root) { ++nstates_; if (comps_->FindSet(s) == kNoStateId) comps_->MakeSet(s); return true; } bool WhiteArc(StateId s, const Arc &arc) { comps_->MakeSet(arc.nextstate); comps_->Union(s, arc.nextstate); return true; } bool GreyArc(StateId s, const Arc &arc) { comps_->Union(s, arc.nextstate); return true; } bool BlackArc(StateId s, const Arc &arc) { comps_->Union(s, arc.nextstate); return true; } void FinishState(StateId s) {} void FinishVisit() { if (cc_) GetCcVector(cc_); } // Returns number of components. // cc[i]: connected component number for state i. int GetCcVector(std::vector<StateId> *cc) { cc->clear(); cc->resize(nstates_, kNoStateId); StateId ncomp = 0; for (StateId s = 0; s < nstates_; ++s) { const auto rep = comps_->FindSet(s); auto &comp = (*cc)[rep]; if (comp == kNoStateId) { comp = ncomp; ++ncomp; } (*cc)[s] = comp; } return ncomp; } private: UnionFind<StateId> *comps_; // Components. std::vector<StateId> *cc_; // State's cc number. StateId nstates_; // State count. }; // Finds and returns strongly-connected components, accessible and // coaccessible states and related properties. Uses Tarjan's single // DFS SCC algorithm (see Aho, et al, "Design and Analysis of Computer // Algorithms", 189pp). Use with DfsVisit(); template <class Arc> class SccVisitor { public: using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // scc[i]: strongly-connected component number for state i. // SCC numbers will be in topological order for acyclic input. // access[i]: accessibility of state i. // coaccess[i]: coaccessibility of state i. // Any of above can be NULL. // props: related property bits (cyclicity, initial cyclicity, // accessibility, coaccessibility) set/cleared (o.w. unchanged). SccVisitor(std::vector<StateId> *scc, std::vector<bool> *access, std::vector<bool> *coaccess, uint64 *props) : scc_(scc), access_(access), coaccess_(coaccess), props_(props) {} explicit SccVisitor(uint64 *props) : scc_(nullptr), access_(nullptr), coaccess_(nullptr), props_(props) {} void InitVisit(const Fst<Arc> &fst); bool InitState(StateId s, StateId root); bool TreeArc(StateId s, const Arc &arc) { return true; } bool BackArc(StateId s, const Arc &arc) { const auto t = arc.nextstate; if ((*dfnumber_)[t] < (*lowlink_)[s]) (*lowlink_)[s] = (*dfnumber_)[t]; if ((*coaccess_)[t]) (*coaccess_)[s] = true; *props_ |= kCyclic; *props_ &= ~kAcyclic; if (t == start_) { *props_ |= kInitialCyclic; *props_ &= ~kInitialAcyclic; } return true; } bool ForwardOrCrossArc(StateId s, const Arc &arc) { const auto t = arc.nextstate; if ((*dfnumber_)[t] < (*dfnumber_)[s] /* cross edge */ && (*onstack_)[t] && (*dfnumber_)[t] < (*lowlink_)[s]) { (*lowlink_)[s] = (*dfnumber_)[t]; } if ((*coaccess_)[t]) (*coaccess_)[s] = true; return true; } // Last argument always ignored, but required by the interface. void FinishState(StateId state, StateId p, const Arc *); void FinishVisit() { // Numbers SCCs in topological order when acyclic. if (scc_) { for (StateId s = 0; s < scc_->size(); ++s) { (*scc_)[s] = nscc_ - 1 - (*scc_)[s]; } } if (coaccess_internal_) delete coaccess_; dfnumber_.reset(); lowlink_.reset(); onstack_.reset(); scc_stack_.reset(); } private: std::vector<StateId> *scc_; // State's scc number. std::vector<bool> *access_; // State's accessibility. std::vector<bool> *coaccess_; // State's coaccessibility. uint64 *props_; const Fst<Arc> *fst_; StateId start_; StateId nstates_; // State count. StateId nscc_; // SCC count. bool coaccess_internal_; std::unique_ptr<std::vector<StateId>> dfnumber_; // State discovery times. std::unique_ptr<std::vector<StateId>> lowlink_; // lowlink[state] == dfnumber[state] => SCC root std::unique_ptr<std::vector<bool>> onstack_; // Is a state on the SCC stack? std::unique_ptr<std::vector<StateId>> scc_stack_; // SCC stack, with random access. }; template <class Arc> inline void SccVisitor<Arc>::InitVisit(const Fst<Arc> &fst) { if (scc_) scc_->clear(); if (access_) access_->clear(); if (coaccess_) { coaccess_->clear(); coaccess_internal_ = false; } else { coaccess_ = new std::vector<bool>; coaccess_internal_ = true; } *props_ |= kAcyclic | kInitialAcyclic | kAccessible | kCoAccessible; *props_ &= ~(kCyclic | kInitialCyclic | kNotAccessible | kNotCoAccessible); fst_ = &fst; start_ = fst.Start(); nstates_ = 0; nscc_ = 0; dfnumber_.reset(new std::vector<StateId>()); lowlink_.reset(new std::vector<StateId>()); onstack_.reset(new std::vector<bool>()); scc_stack_.reset(new std::vector<StateId>()); } template <class Arc> inline bool SccVisitor<Arc>::InitState(StateId s, StateId root) { scc_stack_->push_back(s); while (dfnumber_->size() <= s) { if (scc_) scc_->push_back(-1); if (access_) access_->push_back(false); coaccess_->push_back(false); dfnumber_->push_back(-1); lowlink_->push_back(-1); onstack_->push_back(false); } (*dfnumber_)[s] = nstates_; (*lowlink_)[s] = nstates_; (*onstack_)[s] = true; if (root == start_) { if (access_) (*access_)[s] = true; } else { if (access_) (*access_)[s] = false; *props_ |= kNotAccessible; *props_ &= ~kAccessible; } ++nstates_; return true; } template <class Arc> inline void SccVisitor<Arc>::FinishState(StateId s, StateId p, const Arc *) { if (fst_->Final(s) != Weight::Zero()) (*coaccess_)[s] = true; if ((*dfnumber_)[s] == (*lowlink_)[s]) { // Root of new SCC. bool scc_coaccess = false; auto i = scc_stack_->size(); StateId t; do { t = (*scc_stack_)[--i]; if ((*coaccess_)[t]) scc_coaccess = true; } while (s != t); do { t = scc_stack_->back(); if (scc_) (*scc_)[t] = nscc_; if (scc_coaccess) (*coaccess_)[t] = true; (*onstack_)[t] = false; scc_stack_->pop_back(); } while (s != t); if (!scc_coaccess) { *props_ |= kNotCoAccessible; *props_ &= ~kCoAccessible; } ++nscc_; } if (p != kNoStateId) { if ((*coaccess_)[s]) (*coaccess_)[p] = true; if ((*lowlink_)[s] < (*lowlink_)[p]) (*lowlink_)[p] = (*lowlink_)[s]; } } // Trims an FST, removing states and arcs that are not on successful paths. // This version modifies its input. // // Complexity: // // Time: O(V + E) // Space: O(V + E) // // where V = # of states and E = # of arcs. template <class Arc> void Connect(MutableFst<Arc> *fst) { using StateId = typename Arc::StateId; std::vector<bool> access; std::vector<bool> coaccess; uint64 props = 0; SccVisitor<Arc> scc_visitor(nullptr, &access, &coaccess, &props); DfsVisit(*fst, &scc_visitor); std::vector<StateId> dstates; for (StateId s = 0; s < access.size(); ++s) { if (!access[s] || !coaccess[s]) dstates.push_back(s); } fst->DeleteStates(dstates); fst->SetProperties(kAccessible | kCoAccessible, kAccessible | kCoAccessible); } // Returns an acyclic FST where each SCC in the input FST has been condensed to // a single state with transitions between SCCs retained and within SCCs // dropped. Also populates 'scc' with a mapping from input to output states. template <class Arc> void Condense(const Fst<Arc> &ifst, MutableFst<Arc> *ofst, std::vector<typename Arc::StateId> *scc) { using StateId = typename Arc::StateId; ofst->DeleteStates(); uint64 props = 0; SccVisitor<Arc> scc_visitor(scc, nullptr, nullptr, &props); DfsVisit(ifst, &scc_visitor); for (StateId s = 0; s < scc->size(); ++s) { const auto c = (*scc)[s]; while (c >= ofst->NumStates()) ofst->AddState(); if (s == ifst.Start()) ofst->SetStart(c); const auto weight = ifst.Final(s); if (weight != Arc::Weight::Zero()) ofst->SetFinal(c, Plus(ofst->Final(c), weight)); for (ArcIterator<Fst<Arc>> aiter(ifst, s); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); const auto nextc = (*scc)[arc.nextstate]; if (nextc != c) { while (nextc >= ofst->NumStates()) ofst->AddState(); arc.nextstate = nextc; ofst->AddArc(c, arc); } } } ofst->SetProperties(kAcyclic | kInitialAcyclic, kAcyclic | kInitialAcyclic); } } // namespace fst #endif // FST_CONNECT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/far/script-impl.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Definitions and functions for invoking and using Far main functions that // support multiple and extensible arc types. #include <fst/extensions/far/script-impl.h> #include <string> #include <fst/extensions/far/far.h> #include <fstream> namespace fst { namespace script { string LoadArcTypeFromFar(const string &far_fname) { FarHeader hdr; if (!hdr.Read(far_fname)) { LOG(ERROR) << "Error reading FAR: " << far_fname; return ""; } string atype = hdr.ArcType(); if (atype == "unknown") { LOG(ERROR) << "Empty FST archive: " << far_fname; return ""; } return atype; } string LoadArcTypeFromFst(const string &fst_fname) { FstHeader hdr; std::ifstream in(fst_fname, std::ios_base::in | std::ios_base::binary); if (!hdr.Read(in, fst_fname)) { LOG(ERROR) << "Error reading FST: " << fst_fname; return ""; } return hdr.ArcType(); } } // namespace script } // namespace fst
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests2/test_align_tts_train.py
import glob import json import os import shutil from trainer import get_last_checkpoint from tests import get_device_id, get_tests_output_path, run_cli from TTS.tts.configs.align_tts_config import AlignTTSConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") config = AlignTTSConfig( batch_size=8, eval_batch_size=8, num_loader_workers=0, num_eval_loader_workers=0, text_cleaner="english_cleaners", use_phonemes=False, phoneme_language="en-us", phoneme_cache_path=os.path.join(get_tests_output_path(), "train_outputs/phoneme_cache/"), run_eval=True, test_delay_epochs=-1, epochs=1, print_step=1, print_eval=True, test_sentences=[ "Be a voice, not an echo.", ], ) config.audio.do_trim_silence = True config.audio.trim_db = 60 config.save_json(config_path) # train the model for one epoch command_train = ( f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " f"--coqpit.output_path {output_path} " "--coqpit.datasets.0.formatter ljspeech " "--coqpit.datasets.0.meta_file_train metadata.csv " "--coqpit.datasets.0.meta_file_val metadata.csv " "--coqpit.datasets.0.path tests/data/ljspeech " "--coqpit.test_delay_epochs 0 " ) run_cli(command_train) # Find latest folder continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) # Inference using TTS API continue_config_path = os.path.join(continue_path, "config.json") continue_restore_path, _ = get_last_checkpoint(continue_path) out_wav_path = os.path.join(get_tests_output_path(), "output.wav") # Check integrity of the config with open(continue_config_path, "r", encoding="utf-8") as f: config_loaded = json.load(f) assert config_loaded["characters"] is not None assert config_loaded["output_path"] in continue_path assert config_loaded["test_delay_epochs"] == 0 # Load the model and run inference inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" run_cli(inference_command) # restore the model and continue training for one more epoch command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " run_cli(command_train) shutil.rmtree(continue_path)
0
coqui_public_repos/STT-examples/python_websocket_server
coqui_public_repos/STT-examples/python_websocket_server/stt_server/app.py
import json from concurrent.futures import ThreadPoolExecutor from pathlib import Path from time import perf_counter from pyhocon import ConfigFactory from sanic import Sanic, response from sanic.log import logger from stt_server.engine import SpeechToTextEngine from stt_server.models import Response, Error # Load app configs and initialize STT model conf = ConfigFactory.parse_file("application.conf") engine = SpeechToTextEngine( model_path=Path(conf["stt.model"]).absolute().as_posix(), scorer_path=Path(conf["stt.scorer"]).absolute().as_posix(), ) # Initialze Sanic and ThreadPoolExecutor executor = ThreadPoolExecutor(max_workers=conf["server.threadpool.count"]) app = Sanic("stt_server") @app.route("/", methods=["GET"]) async def healthcheck(_): return response.text("Welcome to STT Server!") @app.websocket("/api/v1/stt") async def stt(request, ws): logger.debug(f"Received {request.method} request at {request.path}") try: audio = await ws.recv() inference_start = perf_counter() text = await app.loop.run_in_executor(executor, lambda: engine.run(audio)) inference_end = perf_counter() - inference_start await ws.send(json.dumps(Response(text, inference_end).__dict__)) logger.debug(f"Completed {request.method} request at {request.path} in {inference_end} seconds") except Exception as e: # pylint: disable=broad-except logger.debug(f"Failed to process {request.method} request at {request.path}. The exception is: {str(e)}.") await ws.send(json.dumps(Error("Something went wrong").__dict__)) await ws.close() if __name__ == "__main__": app.run( host=conf["server.http.host"], port=conf["server.http.port"], access_log=True, debug=True, )
0
coqui_public_repos/TTS/TTS/vocoder
coqui_public_repos/TTS/TTS/vocoder/layers/losses.py
from typing import Dict, Union import torch from torch import nn from torch.nn import functional as F from TTS.utils.audio.torch_transforms import TorchSTFT from TTS.vocoder.utils.distribution import discretized_mix_logistic_loss, gaussian_loss ################################# # GENERATOR LOSSES ################################# class STFTLoss(nn.Module): """STFT loss. Input generate and real waveforms are converted to spectrograms compared with L1 and Spectral convergence losses. It is from ParallelWaveGAN paper https://arxiv.org/pdf/1910.11480.pdf""" def __init__(self, n_fft, hop_length, win_length): super().__init__() self.n_fft = n_fft self.hop_length = hop_length self.win_length = win_length self.stft = TorchSTFT(n_fft, hop_length, win_length) def forward(self, y_hat, y): y_hat_M = self.stft(y_hat) y_M = self.stft(y) # magnitude loss loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M)) # spectral convergence loss loss_sc = torch.norm(y_M - y_hat_M, p="fro") / torch.norm(y_M, p="fro") return loss_mag, loss_sc class MultiScaleSTFTLoss(torch.nn.Module): """Multi-scale STFT loss. Input generate and real waveforms are converted to spectrograms compared with L1 and Spectral convergence losses. It is from ParallelWaveGAN paper https://arxiv.org/pdf/1910.11480.pdf""" def __init__(self, n_ffts=(1024, 2048, 512), hop_lengths=(120, 240, 50), win_lengths=(600, 1200, 240)): super().__init__() self.loss_funcs = torch.nn.ModuleList() for n_fft, hop_length, win_length in zip(n_ffts, hop_lengths, win_lengths): self.loss_funcs.append(STFTLoss(n_fft, hop_length, win_length)) def forward(self, y_hat, y): N = len(self.loss_funcs) loss_sc = 0 loss_mag = 0 for f in self.loss_funcs: lm, lsc = f(y_hat, y) loss_mag += lm loss_sc += lsc loss_sc /= N loss_mag /= N return loss_mag, loss_sc class L1SpecLoss(nn.Module): """L1 Loss over Spectrograms as described in HiFiGAN paper https://arxiv.org/pdf/2010.05646.pdf""" def __init__( self, sample_rate, n_fft, hop_length, win_length, mel_fmin=None, mel_fmax=None, n_mels=None, use_mel=True ): super().__init__() self.use_mel = use_mel self.stft = TorchSTFT( n_fft, hop_length, win_length, sample_rate=sample_rate, mel_fmin=mel_fmin, mel_fmax=mel_fmax, n_mels=n_mels, use_mel=use_mel, ) def forward(self, y_hat, y): y_hat_M = self.stft(y_hat) y_M = self.stft(y) # magnitude loss loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M)) return loss_mag class MultiScaleSubbandSTFTLoss(MultiScaleSTFTLoss): """Multiscale STFT loss for multi band model outputs. From MultiBand-MelGAN paper https://arxiv.org/abs/2005.05106""" # pylint: disable=no-self-use def forward(self, y_hat, y): y_hat = y_hat.view(-1, 1, y_hat.shape[2]) y = y.view(-1, 1, y.shape[2]) return super().forward(y_hat.squeeze(1), y.squeeze(1)) class MSEGLoss(nn.Module): """Mean Squared Generator Loss""" # pylint: disable=no-self-use def forward(self, score_real): loss_fake = F.mse_loss(score_real, score_real.new_ones(score_real.shape)) return loss_fake class HingeGLoss(nn.Module): """Hinge Discriminator Loss""" # pylint: disable=no-self-use def forward(self, score_real): # TODO: this might be wrong loss_fake = torch.mean(F.relu(1.0 - score_real)) return loss_fake ################################## # DISCRIMINATOR LOSSES ################################## class MSEDLoss(nn.Module): """Mean Squared Discriminator Loss""" def __init__( self, ): super().__init__() self.loss_func = nn.MSELoss() # pylint: disable=no-self-use def forward(self, score_fake, score_real): loss_real = self.loss_func(score_real, score_real.new_ones(score_real.shape)) loss_fake = self.loss_func(score_fake, score_fake.new_zeros(score_fake.shape)) loss_d = loss_real + loss_fake return loss_d, loss_real, loss_fake class HingeDLoss(nn.Module): """Hinge Discriminator Loss""" # pylint: disable=no-self-use def forward(self, score_fake, score_real): loss_real = torch.mean(F.relu(1.0 - score_real)) loss_fake = torch.mean(F.relu(1.0 + score_fake)) loss_d = loss_real + loss_fake return loss_d, loss_real, loss_fake class MelganFeatureLoss(nn.Module): def __init__( self, ): super().__init__() self.loss_func = nn.L1Loss() # pylint: disable=no-self-use def forward(self, fake_feats, real_feats): loss_feats = 0 num_feats = 0 for idx, _ in enumerate(fake_feats): for fake_feat, real_feat in zip(fake_feats[idx], real_feats[idx]): loss_feats += self.loss_func(fake_feat, real_feat) num_feats += 1 loss_feats = loss_feats / num_feats return loss_feats ##################################### # LOSS WRAPPERS ##################################### def _apply_G_adv_loss(scores_fake, loss_func): """Compute G adversarial loss function and normalize values""" adv_loss = 0 if isinstance(scores_fake, list): for score_fake in scores_fake: fake_loss = loss_func(score_fake) adv_loss += fake_loss adv_loss /= len(scores_fake) else: fake_loss = loss_func(scores_fake) adv_loss = fake_loss return adv_loss def _apply_D_loss(scores_fake, scores_real, loss_func): """Compute D loss func and normalize loss values""" loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss_, fake_loss_ = loss_func(score_fake=score_fake, score_real=score_real) loss += total_loss real_loss += real_loss_ fake_loss += fake_loss_ # normalize loss values with number of scales (discriminators) loss /= len(scores_fake) real_loss /= len(scores_real) fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss ################################## # MODEL LOSSES ################################## class GeneratorLoss(nn.Module): """Generator Loss Wrapper. Based on model configuration it sets a right set of loss functions and computes losses. It allows to experiment with different combinations of loss functions with different models by just changing configurations. Args: C (AttrDict): model configuration. """ def __init__(self, C): super().__init__() assert not ( C.use_mse_gan_loss and C.use_hinge_gan_loss ), " [!] Cannot use HingeGANLoss and MSEGANLoss together." self.use_stft_loss = C.use_stft_loss if "use_stft_loss" in C else False self.use_subband_stft_loss = C.use_subband_stft_loss if "use_subband_stft_loss" in C else False self.use_mse_gan_loss = C.use_mse_gan_loss if "use_mse_gan_loss" in C else False self.use_hinge_gan_loss = C.use_hinge_gan_loss if "use_hinge_gan_loss" in C else False self.use_feat_match_loss = C.use_feat_match_loss if "use_feat_match_loss" in C else False self.use_l1_spec_loss = C.use_l1_spec_loss if "use_l1_spec_loss" in C else False self.stft_loss_weight = C.stft_loss_weight if "stft_loss_weight" in C else 0.0 self.subband_stft_loss_weight = C.subband_stft_loss_weight if "subband_stft_loss_weight" in C else 0.0 self.mse_gan_loss_weight = C.mse_G_loss_weight if "mse_G_loss_weight" in C else 0.0 self.hinge_gan_loss_weight = C.hinge_G_loss_weight if "hinde_G_loss_weight" in C else 0.0 self.feat_match_loss_weight = C.feat_match_loss_weight if "feat_match_loss_weight" in C else 0.0 self.l1_spec_loss_weight = C.l1_spec_loss_weight if "l1_spec_loss_weight" in C else 0.0 if C.use_stft_loss: self.stft_loss = MultiScaleSTFTLoss(**C.stft_loss_params) if C.use_subband_stft_loss: self.subband_stft_loss = MultiScaleSubbandSTFTLoss(**C.subband_stft_loss_params) if C.use_mse_gan_loss: self.mse_loss = MSEGLoss() if C.use_hinge_gan_loss: self.hinge_loss = HingeGLoss() if C.use_feat_match_loss: self.feat_match_loss = MelganFeatureLoss() if C.use_l1_spec_loss: assert C.audio["sample_rate"] == C.l1_spec_loss_params["sample_rate"] self.l1_spec_loss = L1SpecLoss(**C.l1_spec_loss_params) def forward( self, y_hat=None, y=None, scores_fake=None, feats_fake=None, feats_real=None, y_hat_sub=None, y_sub=None ): gen_loss = 0 adv_loss = 0 return_dict = {} # STFT Loss if self.use_stft_loss: stft_loss_mg, stft_loss_sc = self.stft_loss(y_hat[:, :, : y.size(2)].squeeze(1), y.squeeze(1)) return_dict["G_stft_loss_mg"] = stft_loss_mg return_dict["G_stft_loss_sc"] = stft_loss_sc gen_loss = gen_loss + self.stft_loss_weight * (stft_loss_mg + stft_loss_sc) # L1 Spec loss if self.use_l1_spec_loss: l1_spec_loss = self.l1_spec_loss(y_hat, y) return_dict["G_l1_spec_loss"] = l1_spec_loss gen_loss = gen_loss + self.l1_spec_loss_weight * l1_spec_loss # subband STFT Loss if self.use_subband_stft_loss: subband_stft_loss_mg, subband_stft_loss_sc = self.subband_stft_loss(y_hat_sub, y_sub) return_dict["G_subband_stft_loss_mg"] = subband_stft_loss_mg return_dict["G_subband_stft_loss_sc"] = subband_stft_loss_sc gen_loss = gen_loss + self.subband_stft_loss_weight * (subband_stft_loss_mg + subband_stft_loss_sc) # multiscale MSE adversarial loss if self.use_mse_gan_loss and scores_fake is not None: mse_fake_loss = _apply_G_adv_loss(scores_fake, self.mse_loss) return_dict["G_mse_fake_loss"] = mse_fake_loss adv_loss = adv_loss + self.mse_gan_loss_weight * mse_fake_loss # multiscale Hinge adversarial loss if self.use_hinge_gan_loss and not scores_fake is not None: hinge_fake_loss = _apply_G_adv_loss(scores_fake, self.hinge_loss) return_dict["G_hinge_fake_loss"] = hinge_fake_loss adv_loss = adv_loss + self.hinge_gan_loss_weight * hinge_fake_loss # Feature Matching Loss if self.use_feat_match_loss and not feats_fake is None: feat_match_loss = self.feat_match_loss(feats_fake, feats_real) return_dict["G_feat_match_loss"] = feat_match_loss adv_loss = adv_loss + self.feat_match_loss_weight * feat_match_loss return_dict["loss"] = gen_loss + adv_loss return_dict["G_gen_loss"] = gen_loss return_dict["G_adv_loss"] = adv_loss return return_dict class DiscriminatorLoss(nn.Module): """Like ```GeneratorLoss```""" def __init__(self, C): super().__init__() assert not ( C.use_mse_gan_loss and C.use_hinge_gan_loss ), " [!] Cannot use HingeGANLoss and MSEGANLoss together." self.use_mse_gan_loss = C.use_mse_gan_loss self.use_hinge_gan_loss = C.use_hinge_gan_loss if C.use_mse_gan_loss: self.mse_loss = MSEDLoss() if C.use_hinge_gan_loss: self.hinge_loss = HingeDLoss() def forward(self, scores_fake, scores_real): loss = 0 return_dict = {} if self.use_mse_gan_loss: mse_D_loss, mse_D_real_loss, mse_D_fake_loss = _apply_D_loss( scores_fake=scores_fake, scores_real=scores_real, loss_func=self.mse_loss ) return_dict["D_mse_gan_loss"] = mse_D_loss return_dict["D_mse_gan_real_loss"] = mse_D_real_loss return_dict["D_mse_gan_fake_loss"] = mse_D_fake_loss loss += mse_D_loss if self.use_hinge_gan_loss: hinge_D_loss, hinge_D_real_loss, hinge_D_fake_loss = _apply_D_loss( scores_fake=scores_fake, scores_real=scores_real, loss_func=self.hinge_loss ) return_dict["D_hinge_gan_loss"] = hinge_D_loss return_dict["D_hinge_gan_real_loss"] = hinge_D_real_loss return_dict["D_hinge_gan_fake_loss"] = hinge_D_fake_loss loss += hinge_D_loss return_dict["loss"] = loss return return_dict class WaveRNNLoss(nn.Module): def __init__(self, wave_rnn_mode: Union[str, int]): super().__init__() if wave_rnn_mode == "mold": self.loss_func = discretized_mix_logistic_loss elif wave_rnn_mode == "gauss": self.loss_func = gaussian_loss elif isinstance(wave_rnn_mode, int): self.loss_func = torch.nn.CrossEntropyLoss() else: raise ValueError(" [!] Unknown mode for Wavernn.") def forward(self, y_hat, y) -> Dict: loss = self.loss_func(y_hat, y) return {"loss": loss}
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstconcat.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. int fstconcat_main(int argc, char **argv); int main(int argc, char **argv) { return fstconcat_main(argc, argv); }
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/uwp/STTUWP.sln
 Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.29519.87 MinimumVisualStudioVersion = 10.0.40219.1 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STTUWP", "STTUWP\STTUWP.csproj", "{49AAC24D-6A76-4910-913A-94D2D67B6226}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|ARM = Debug|ARM Debug|ARM64 = Debug|ARM64 Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|ARM = Release|ARM Release|ARM64 = Release|ARM64 Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM.ActiveCfg = Debug|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM.Build.0 = Debug|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM.Deploy.0 = Debug|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM64.ActiveCfg = Debug|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM64.Build.0 = Debug|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|ARM64.Deploy.0 = Debug|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x64.ActiveCfg = Debug|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x64.Build.0 = Debug|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x64.Deploy.0 = Debug|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x86.ActiveCfg = Debug|x86 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x86.Build.0 = Debug|x86 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Debug|x86.Deploy.0 = Debug|x86 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM.ActiveCfg = Release|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM.Build.0 = Release|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM.Deploy.0 = Release|ARM {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM64.ActiveCfg = Release|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM64.Build.0 = Release|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|ARM64.Deploy.0 = Release|ARM64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x64.ActiveCfg = Release|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x64.Build.0 = Release|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x64.Deploy.0 = Release|x64 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x86.ActiveCfg = Release|x86 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x86.Build.0 = Release|x86 {49AAC24D-6A76-4910-913A-94D2D67B6226}.Release|x86.Deploy.0 = Release|x86 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {D6764C5D-937A-4FF8-AE1F-29EC004C905C} EndGlobalSection EndGlobal
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/extensions/linear/Makefile.am
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) if HAVE_BIN bin_PROGRAMS = fstlinear fstloglinearapply LDADD = libfstlinearscript.la ../../script/libfstscript.la \ ../../lib/libfst.la -lm $(DL_LIBS) fstlinear_SOURCES = fstlinear.cc fstloglinearapply_SOURCES = fstloglinearapply.cc endif if HAVE_SCRIPT libfstlinearscript_la_SOURCES = linearscript.cc libfstlinearscript_la_LDFLAGS = -version-info 13:0:0 -lm $(DL_LIBS) libfstlinearscript_la_LIBADD = ../../script/libfstscript.la \ ../../lib/libfst.la -lm $(DL_LIBS) endif if HAVE_SCRIPT libfst_LTLIBRARIES = linear_tagger-fst.la \ linear_classifier-fst.la lib_LTLIBRARIES = libfstlinearscript.la else libfst_LTLIBRARIES = linear_tagger-fst.la linear_classifier-fst.la endif libfstdir = @libfstdir@ linear_tagger_fst_la_SOURCES = linear-tagger-fst.cc linear_tagger_fst_la_LDFLAGS = -module linear_classifier_fst_la_SOURCES = linear-classifier-fst.cc linear_classifier_fst_la_LDFLAGS = -module
0
coqui_public_repos/inference-engine/third_party/kenlm/lm
coqui_public_repos/inference-engine/third_party/kenlm/lm/interpolate/pipeline.hh
#ifndef LM_INTERPOLATE_PIPELINE_H #define LM_INTERPOLATE_PIPELINE_H #include "lm/common/model_buffer.hh" #include "util/fixed_array.hh" #include "util/stream/config.hh" #include <cstddef> #include <string> namespace lm { namespace interpolate { struct Config { std::vector<float> lambdas; util::stream::SortConfig sort; std::size_t BufferSize() const { return sort.buffer_size; } }; void Pipeline(util::FixedArray<ModelBuffer> &models, const Config &config, int write_file); }} // namespaces #endif // LM_INTERPOLATE_PIPELINE_H
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/script/text-io.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Utilities for reading and writing textual strings representing states, // labels, and weights and files specifying label-label pairs and potentials // (state-weight pairs). #ifndef FST_SCRIPT_TEXT_IO_H__ #define FST_SCRIPT_TEXT_IO_H__ #include <string> #include <vector> #include <fst/script/weight-class.h> namespace fst { namespace script { bool ReadPotentials(const string &weight_type, const string &filename, std::vector<WeightClass> *potentials); bool WritePotentials(const string &filename, const std::vector<WeightClass> &potentials); } // namespace script } // namespace fst #endif // FST_SCRIPT_TEXT_IO_H__
0
coqui_public_repos/TTS/TTS/vocoder
coqui_public_repos/TTS/TTS/vocoder/layers/melgan.py
from torch import nn from torch.nn.utils.parametrizations import weight_norm from torch.nn.utils.parametrize import remove_parametrizations class ResidualStack(nn.Module): def __init__(self, channels, num_res_blocks, kernel_size): super().__init__() assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd." base_padding = (kernel_size - 1) // 2 self.blocks = nn.ModuleList() for idx in range(num_res_blocks): layer_kernel_size = kernel_size layer_dilation = layer_kernel_size**idx layer_padding = base_padding * layer_dilation self.blocks += [ nn.Sequential( nn.LeakyReLU(0.2), nn.ReflectionPad1d(layer_padding), weight_norm( nn.Conv1d(channels, channels, kernel_size=kernel_size, dilation=layer_dilation, bias=True) ), nn.LeakyReLU(0.2), weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)), ) ] self.shortcuts = nn.ModuleList( [weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)) for _ in range(num_res_blocks)] ) def forward(self, x): for block, shortcut in zip(self.blocks, self.shortcuts): x = shortcut(x) + block(x) return x def remove_weight_norm(self): for block, shortcut in zip(self.blocks, self.shortcuts): remove_parametrizations(block[2], "weight") remove_parametrizations(block[4], "weight") remove_parametrizations(shortcut, "weight")
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/optimizer/graph_transformer_config.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <string> #include <vector> #include <type_traits> namespace onnxruntime { struct GraphTransformerConfiguration { struct PropagateCastOpsConfiguration { // Propagate FP16 Cast operations up and FP32 operations down /* * Cast propagation strategy. * One strategy is to insert casts around all the nodes with the allowed opcodes * and reduce, by removing redundent-casts and back-to-back-casts etc., and * the other is to propagate casts using flood-fill approach, expanding float16 regions in the graph * traversing the graph up/down. */ enum class Strategy { None = 0, InsertAndReduce = 1, FloodFill = 2, /* Propagate FP16 Cast operations up and FP32 operations down */ RemoveInputOutputUpDownCasts = 4 /* If all the floatingpoint inputs of a node are casted to FP32 and all the floatingpoint outputs are casted to FP16. Then remove all input and output casts. */ }; using Strategy_t = std::underlying_type<Strategy>::type; friend constexpr Strategy operator|(const Strategy s1, const Strategy s2) { return static_cast<Strategy>(static_cast<Strategy_t>(s1) | static_cast<Strategy_t>(s2)); } friend Strategy& operator|=(Strategy& s1, Strategy s2) { s1 = s1 | s2; return s1; } friend constexpr Strategy operator&(const Strategy s1, const Strategy s2) { return static_cast<Strategy>(static_cast<Strategy_t>(s1) & static_cast<Strategy_t>(s2)); } friend constexpr Strategy& operator&=(Strategy& s1, Strategy s2) { s1 = s1 & s2; return s1; } friend constexpr bool operator==(Strategy s1, Strategy s2) { return static_cast<Strategy_t>(s1) == static_cast<Strategy_t>(s2); } friend constexpr bool operator!=(Strategy s1, Strategy s2) { return (s1 == s2) == false; } int level{-1}; /* -1 => no cast propagation, 0 => use user specified list of opcodes to allow moving cast operations, 1 => use ORT predefined list of level 1 opcodes in addition to the user specified allow opcodes 2 => use ORT predefined list of level 2 opcodes in addition to the user specified allow opcodes */ Strategy strategy = Strategy::None; // List of allowed opcodes to consider as safe to execute in float16, while moving cast operations std::vector<std::string> allow; }; PropagateCastOpsConfiguration propagate_cast_ops_config; }; // The following declarations are required to refer to these operators in pybind11. constexpr GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy operator|(GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy, GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy); constexpr GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy operator&(GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy, GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy); constexpr bool operator==(GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy, GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy); constexpr bool operator!=(GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy, GraphTransformerConfiguration::PropagateCastOpsConfiguration::Strategy); } // namespace onnxruntime
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/optimizer/graph_transformer_utils.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <gsl/gsl> #include "core/optimizer/graph_transformer.h" #include "core/optimizer/rule_based_graph_transformer.h" #include "core/optimizer/rewrite_rule.h" namespace onnxruntime { struct FreeDimensionOverride; class IExecutionProvider; namespace optimizer_utils { /** Generates all predefined rules for this level. If rules_to_enable is not empty, it returns the intersection of predefined rules and rules_to_enable. TODO: This is visible for testing at the moment, but we should rather make it private. */ std::vector<std::unique_ptr<RewriteRule>> GenerateRewriteRules( TransformerLevel level, const std::unordered_set<std::string>& rules_to_disable = {}); /** Given a TransformerLevel, this method generates a name for the rule-based graph transformer of that level. */ std::string GenerateRuleBasedTransformerName(TransformerLevel level); /** Generates all rule-based transformers for this level. */ std::unique_ptr<RuleBasedGraphTransformer> GenerateRuleBasedGraphTransformer( TransformerLevel level, const std::unordered_set<std::string>& rules_to_disable, const std::unordered_set<std::string>& compatible_execution_providers); /** Generates all predefined (both rule-based and non-rule-based) transformers for this level. Any transformers or rewrite rules named in rules_and_transformers_to_disable will be excluded. */ std::vector<std::unique_ptr<GraphTransformer>> GenerateTransformers( TransformerLevel level, const SessionOptions& session_options, const IExecutionProvider& execution_provider /*required by constant folding*/, const std::unordered_set<std::string>& rules_and_transformers_to_disable = {}); } // namespace optimizer_utils } // namespace onnxruntime
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstrmepsilon-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Removes epsilons from an FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/log.h> #include <fst/script/getters.h> #include <fst/script/rmepsilon.h> DECLARE_bool(connect); DECLARE_double(delta); DECLARE_int64(nstate); DECLARE_string(queue_type); DECLARE_string(weight); int fstrmepsilon_main(int argc, char **argv) { namespace s = fst::script; using fst::script::MutableFstClass; using fst::script::WeightClass; string usage = "Removes epsilons from an FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<MutableFstClass> fst(MutableFstClass::Read(in_name, true)); if (!fst) return 1; const auto weight_threshold = FLAGS_weight.empty() ? WeightClass::Zero(fst->WeightType()) : WeightClass(fst->WeightType(), FLAGS_weight); fst::QueueType queue_type; if (!s::GetQueueType(FLAGS_queue_type, &queue_type)) { LOG(ERROR) << argv[0] << ": Unknown or unsupported queue type: " << FLAGS_queue_type; return 1; } const s::RmEpsilonOptions opts(queue_type, FLAGS_connect, weight_threshold, FLAGS_nstate, FLAGS_delta); s::RmEpsilon(fst.get(), opts); return !fst->Write(out_name); }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/linear/loglinear-apply.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_LINEAR_LOGLINEAR_APPLY_H_ #define FST_EXTENSIONS_LINEAR_LOGLINEAR_APPLY_H_ #include <fst/compat.h> #include <fst/arc.h> #include <fst/arc-map.h> #include <fst/compose.h> #include <fst/determinize.h> #include <fst/float-weight.h> #include <fst/fst.h> #include <fst/minimize.h> #include <fst/mutable-fst.h> #include <fst/project.h> #include <fst/rmepsilon.h> #include <fst/vector-fst.h> namespace fst { // Applies a FST model as a discriminative model to weighted input // `ifst`. `A` is an arc type with tropical weight of all the // input/output FSTs. // // In general, consider `ifst` an unnormalized probability // distribution between its input X and output Y, P(X, Y); and `lfst` // a group of unnormalized probability distributions of all its output // Z for every input Y, Q(Z|Y). `normalize` controls whether Q is // normalized for every Y before chaining with P(X, Y). I.e., for a // path (X, Y, Z) in `ofst` (where Y is hidden), // // - When `normalize` is true, its weight is P(X, Y) Q(Z|Y) / sum_z Q(z|Y); // - When `normalize` is false, its weight is P(X, Y) Q(Z|Y). template <class A> void LogLinearApply(const Fst<A> &ifst, const Fst<A> &lfst, MutableFst<A> *ofst, bool normalize = true) { LogLinearApply<A, LogArc>(ifst, lfst, ofst, normalize); } // This version gives finer control over the arc type (`B`) to be used // in normalization. `B` is an arc type with log weight (e.g. `LogArc` // or `Log64Arc`). template <class A, class B> void LogLinearApply(const Fst<A> &ifst, const Fst<A> &lfst, MutableFst<A> *ofst, bool normalize = true) { if (normalize) { VectorFst<A> unnormalized_ofst, rescored_ifsa; Compose(ifst, lfst, &unnormalized_ofst); { VectorFst<A> tropical_ifsa(unnormalized_ofst); Project(&tropical_ifsa, PROJECT_INPUT); { VectorFst<B> minimal_log_ifsa; { VectorFst<B> log_ifsa; ArcMap(tropical_ifsa, &log_ifsa, WeightConvertMapper<A, B>()); RmEpsilon(&log_ifsa); Determinize(log_ifsa, &minimal_log_ifsa); } Minimize(&minimal_log_ifsa); ArcMap(&minimal_log_ifsa, InvertWeightMapper<B>()); ArcMap(minimal_log_ifsa, &tropical_ifsa, WeightConvertMapper<B, A>()); } ArcSort(&tropical_ifsa, OLabelCompare<A>()); Compose(tropical_ifsa, ifst, &rescored_ifsa); } ArcSort(&rescored_ifsa, OLabelCompare<A>()); Compose(rescored_ifsa, unnormalized_ofst, ofst); } else { Compose(ifst, lfst, ofst); } } } // namespace fst #endif // FST_EXTENSIONS_LINEAR_LOGLINEAR_APPLY_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/script/prune.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/script/prune.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Prune(const FstClass &ifst, MutableFstClass *ofst, const WeightClass &weight_threshold, int64 state_threshold, float delta) { if (!internal::ArcTypesMatch(ifst, *ofst, "Prune") || !ofst->WeightTypesMatch(weight_threshold, "Prune")) { ofst->SetProperties(kError, kError); return; } PruneArgs1 args(ifst, ofst, weight_threshold, state_threshold, delta); Apply<Operation<PruneArgs1>>("Prune", ifst.ArcType(), &args); } void Prune(MutableFstClass *fst, const WeightClass &weight_threshold, int64 state_threshold, float delta) { if (!fst->WeightTypesMatch(weight_threshold, "Prune")) { fst->SetProperties(kError, kError); return; } PruneArgs2 args(fst, weight_threshold, state_threshold, delta); Apply<Operation<PruneArgs2>>("Prune", fst->ArcType(), &args); } REGISTER_FST_OPERATION(Prune, StdArc, PruneArgs1); REGISTER_FST_OPERATION(Prune, LogArc, PruneArgs1); REGISTER_FST_OPERATION(Prune, Log64Arc, PruneArgs1); REGISTER_FST_OPERATION(Prune, StdArc, PruneArgs2); REGISTER_FST_OPERATION(Prune, LogArc, PruneArgs2); REGISTER_FST_OPERATION(Prune, Log64Arc, PruneArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/open-bible-scripts
coqui_public_repos/open-bible-scripts/data/akuapem-twi.txt
https://downloads.open.bible/text/ak/twkONA20/twkONA20_SFM.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_timingfiles.zip https://ebible.org/Scriptures/twi_readaloud.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_GEN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_EXO_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_LEV_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_NUM_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_DEU_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JOS_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JDG_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_RUT_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1SA_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2SA_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1KI_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2KI_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1CH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2CH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_EZR_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_NEH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_EST_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JOB_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_PSA_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_PRO_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ECC_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_SNG_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ISA_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JER_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_LAM_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_EZK_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_DAN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_HOS_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JOL_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_AMO_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_OBA_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JON_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_MIC_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_NAM_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_HAB_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ZEP_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_HAG_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ZEC_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_MAL_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_MAT_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_MRK_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_LUK_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JHN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ACT_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_ROM_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1CO_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2CO_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_GAL_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_EPH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_PHP_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_COL_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1TH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2TH_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1TI_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2TI_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_TIT_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_PHM_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_HEB_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JAS_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1PE_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2PE_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_1JN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_2JN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_3JN_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_JUD_wav.zip https://downloads.open.bible/audio/ak/twkONA20/twkONA20_REV_wav.zip
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/far/print-strings.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Outputs as strings the string FSTs in a finite-state archive. #ifndef FST_EXTENSIONS_FAR_PRINT_STRINGS_H_ #define FST_EXTENSIONS_FAR_PRINT_STRINGS_H_ #include <iomanip> #include <string> #include <vector> #include <fst/flags.h> #include <fst/extensions/far/far.h> #include <fstream> #include <fst/shortest-distance.h> #include <fst/string.h> DECLARE_string(far_field_separator); namespace fst { template <class Arc> void FarPrintStrings(const std::vector<string> &ifilenames, FarEntryType entry_type, FarTokenType far_token_type, const string &begin_key, const string &end_key, bool print_key, bool print_weight, const string &symbols_fname, bool initial_symbols, int32_t generate_filenames, const string &filename_prefix, const string &filename_suffix) { StringTokenType token_type; if (far_token_type == FTT_SYMBOL) { token_type = StringTokenType::SYMBOL; } else if (far_token_type == FTT_BYTE) { token_type = StringTokenType::BYTE; } else if (far_token_type == FTT_UTF8) { token_type = StringTokenType::UTF8; } else { FSTERROR() << "FarPrintStrings: Unknown token type"; return; } std::unique_ptr<const SymbolTable> syms; if (!symbols_fname.empty()) { // TODO(kbg): Allow negative flag? const SymbolTableTextOptions opts(true); syms.reset(SymbolTable::ReadText(symbols_fname, opts)); if (!syms) { LOG(ERROR) << "FarPrintStrings: Error reading symbol table " << symbols_fname; return; } } std::unique_ptr<FarReader<Arc>> far_reader(FarReader<Arc>::Open(ifilenames)); if (!far_reader) return; if (!begin_key.empty()) far_reader->Find(begin_key); string okey; int nrep = 0; for (int i = 1; !far_reader->Done(); far_reader->Next(), ++i) { const auto &key = far_reader->GetKey(); if (!end_key.empty() && end_key < key) break; if (okey == key) { ++nrep; } else { nrep = 0; } okey = key; const auto *fst = far_reader->GetFst(); if (i == 1 && initial_symbols && !syms && fst->InputSymbols()) syms.reset(fst->InputSymbols()->Copy()); string str; VLOG(2) << "Handling key: " << key; StringPrinter<Arc> string_printer(token_type, syms ? syms.get() : fst->InputSymbols()); string_printer(*fst, &str); if (entry_type == FET_LINE) { if (print_key) std::cout << key << FLAGS_far_field_separator[0]; std::cout << str; if (print_weight) std::cout << FLAGS_far_field_separator[0] << ShortestDistance(*fst); std::cout << std::endl; } else if (entry_type == FET_FILE) { std::stringstream sstrm; if (generate_filenames) { sstrm.fill('0'); sstrm << std::right << std::setw(generate_filenames) << i; } else { sstrm << key; if (nrep > 0) sstrm << "." << nrep; } string filename; filename = filename_prefix + sstrm.str() + filename_suffix; std::ofstream ostrm(filename); if (!ostrm) { LOG(ERROR) << "FarPrintStrings: Can't open file: " << filename; return; } ostrm << str; if (token_type == StringTokenType::SYMBOL) ostrm << "\n"; } } } } // namespace fst #endif // FST_EXTENSIONS_FAR_PRINT_STRINGS_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/import_cv2.py
#!/usr/bin/env python """ Broadly speaking, this script takes the audio downloaded from Common Voice for a certain language, in addition to the *.tsv files output by CorporaCreator, and the script formats the data and transcripts to be in a state usable by train.py Use "python3 import_cv2.py -h" for help """ import csv import os import subprocess import unicodedata from multiprocessing import Pool import progressbar import sox from coqui_stt_ctcdecoder import Alphabet from coqui_stt_training.util.downloader import SIMPLE_BAR from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, get_validate_label, print_import_report, ) FIELDNAMES = ["wav_filename", "wav_filesize", "transcript"] SAMPLE_RATE = 16000 CHANNELS = 1 MAX_SECS = 10 PARAMS = None FILTER_OBJ = None class LabelFilter: def __init__(self, normalize, alphabet, validate_fun): self.normalize = normalize self.alphabet = alphabet self.validate_fun = validate_fun def filter(self, label): if self.normalize: label = ( unicodedata.normalize("NFKD", label.strip()) .encode("ascii", "ignore") .decode("ascii", "ignore") ) label = self.validate_fun(label) if self.alphabet and label and not self.alphabet.CanEncode(label): label = None return label def init_worker(params): global FILTER_OBJ # pylint: disable=global-statement validate_label = get_validate_label(params) alphabet = Alphabet(params.filter_alphabet) if params.filter_alphabet else None FILTER_OBJ = LabelFilter(params.normalize, alphabet, validate_label) def one_sample(sample): """Take an audio file, and optionally convert it to 16kHz WAV""" mp3_filename = sample[0] if not os.path.splitext(mp3_filename.lower())[1] == ".mp3": mp3_filename += ".mp3" # Storing wav files next to the mp3 ones - just with a different suffix wav_filename = os.path.splitext(mp3_filename)[0] + ".wav" _maybe_convert_wav(mp3_filename, wav_filename) file_size = -1 frames = 0 if os.path.exists(wav_filename): file_size = os.path.getsize(wav_filename) frames = int( subprocess.check_output( ["soxi", "-s", wav_filename], stderr=subprocess.STDOUT ) ) label = FILTER_OBJ.filter(sample[1]) rows = [] counter = get_counter() if file_size == -1: # Excluding samples that failed upon conversion counter["failed"] += 1 elif label is None: # Excluding samples that failed on label validation counter["invalid_label"] += 1 elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter["too_short"] += 1 elif frames / SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter["too_long"] += 1 else: # This one is good - keep it for the target CSV rows.append((os.path.split(wav_filename)[-1], file_size, label, sample[2])) counter["imported_time"] += frames counter["all"] += 1 counter["total_time"] += frames return (counter, rows) def _maybe_convert_set( dataset, tsv_dir, audio_dir, filter_obj, space_after_every_character=None, rows=None, exclude=None, ): exclude_transcripts = set() exclude_speakers = set() if exclude is not None: for sample in exclude: exclude_transcripts.add(sample[2]) exclude_speakers.add(sample[3]) if rows is None: rows = [] input_tsv = os.path.join(os.path.abspath(tsv_dir), dataset + ".tsv") if not os.path.isfile(input_tsv): return rows print("Loading TSV file: ", input_tsv) # Get audiofile path and transcript for each sentence in tsv samples = [] with open(input_tsv, encoding="utf-8") as input_tsv_file: reader = csv.DictReader(input_tsv_file, delimiter="\t") for row in reader: samples.append( ( os.path.join(audio_dir, row["path"]), row["sentence"], row["client_id"], ) ) counter = get_counter() num_samples = len(samples) print("Importing mp3 files...") pool = Pool(initializer=init_worker, initargs=(PARAMS,)) bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate( pool.imap_unordered(one_sample, samples), start=1 ): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() imported_samples = get_imported_samples(counter) assert counter["all"] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS) output_csv = os.path.join(os.path.abspath(audio_dir), dataset + ".csv") print("Saving new Coqui STT-formatted CSV file to: ", output_csv) with open(output_csv, "w", encoding="utf-8", newline="") as output_csv_file: print("Writing CSV file for train.py as: ", output_csv) writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES) writer.writeheader() bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR) for filename, file_size, transcript, speaker in bar(rows): if transcript in exclude_transcripts or speaker in exclude_speakers: continue if space_after_every_character: writer.writerow( { "wav_filename": filename, "wav_filesize": file_size, "transcript": " ".join(transcript), } ) else: writer.writerow( { "wav_filename": filename, "wav_filesize": file_size, "transcript": transcript, } ) return rows def _preprocess_data(tsv_dir, audio_dir, space_after_every_character=False): exclude = [] for dataset in ["test", "dev", "train", "validated", "other"]: set_samples = _maybe_convert_set( dataset, tsv_dir, audio_dir, space_after_every_character ) if dataset in ["test", "dev"]: exclude += set_samples if dataset == "validated": _maybe_convert_set( "train-all", tsv_dir, audio_dir, space_after_every_character, rows=set_samples, exclude=exclude, ) def _maybe_convert_wav(mp3_filename, wav_filename): if not os.path.exists(wav_filename): transformer = sox.Transformer() transformer.convert(samplerate=SAMPLE_RATE, n_channels=CHANNELS) try: transformer.build(mp3_filename, wav_filename) except sox.core.SoxError: pass def parse_args(): parser = get_importers_parser(description="Import CommonVoice v2.0 corpora") parser.add_argument("tsv_dir", help="Directory containing tsv files") parser.add_argument( "--audio_dir", help='Directory containing the audio clips - defaults to "<tsv_dir>/clips"', ) parser.add_argument( "--filter_alphabet", help="Exclude samples with characters not in provided alphabet", ) parser.add_argument( "--normalize", action="store_true", help="Converts diacritic characters to their base ones", ) parser.add_argument( "--space_after_every_character", action="store_true", help="To help transcript join by white space", ) return parser.parse_args() def main(): audio_dir = ( PARAMS.audio_dir if PARAMS.audio_dir else os.path.join(PARAMS.tsv_dir, "clips") ) _preprocess_data(PARAMS.tsv_dir, audio_dir, PARAMS.space_after_every_character) if __name__ == "__main__": PARAMS = parse_args() main()
0
coqui_public_repos/TTS/recipes/ljspeech
coqui_public_repos/TTS/recipes/ljspeech/hifigan/train_hifigan.py
import os from trainer import Trainer, TrainerArgs from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import HifiganConfig from TTS.vocoder.datasets.preprocess import load_wav_data from TTS.vocoder.models.gan import GAN output_path = os.path.dirname(os.path.abspath(__file__)) config = HifiganConfig( batch_size=32, eval_batch_size=16, num_loader_workers=4, num_eval_loader_workers=4, run_eval=True, test_delay_epochs=5, epochs=1000, seq_len=8192, pad_short=2000, use_noise_augment=True, eval_split_size=10, print_step=25, print_eval=False, mixed_precision=False, lr_gen=1e-4, lr_disc=1e-4, data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) # init audio processor ap = AudioProcessor(**config.audio.to_dict()) # load training samples eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # init model model = GAN(config, ap) # init the trainer and 🚀 trainer = Trainer( TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples ) trainer.fit()
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/lm/test_nounk.arpa
\data\ ngram 1=36 ngram 2=45 ngram 3=10 ngram 4=6 ngram 5=4 \1-grams: -1.383514 , -0.30103 -1.139057 . -0.845098 -1.029493 </s> -99 <s> -0.4149733 -1.285941 a -0.69897 -1.687872 also -0.30103 -1.687872 beyond -0.30103 -1.687872 biarritz -0.30103 -1.687872 call -0.30103 -1.687872 concerns -0.30103 -1.687872 consider -0.30103 -1.687872 considering -0.30103 -1.687872 for -0.30103 -1.509559 higher -0.30103 -1.687872 however -0.30103 -1.687872 i -0.30103 -1.687872 immediate -0.30103 -1.687872 in -0.30103 -1.687872 is -0.30103 -1.285941 little -0.69897 -1.383514 loin -0.30103 -1.687872 look -0.30103 -1.285941 looking -0.4771212 -1.206319 more -0.544068 -1.509559 on -0.4771212 -1.509559 screening -0.4771212 -1.687872 small -0.30103 -1.687872 the -0.30103 -1.687872 to -0.30103 -1.687872 watch -0.30103 -1.687872 watching -0.30103 -1.687872 what -0.30103 -1.687872 would -0.30103 -3.141592 foo -2.718281 bar 3.0 -6.535897 baz -0.0 \2-grams: -0.6925742 , . -0.7522095 , however -0.7522095 , is -0.0602359 . </s> -0.4846522 <s> looking -0.4771214 -1.051485 <s> screening -1.07153 <s> the -1.07153 <s> watching -1.07153 <s> what -0.09132547 a little -0.69897 -0.2922095 also call -0.2922095 beyond immediate -0.2705918 biarritz . -0.2922095 call for -0.2922095 concerns in -0.2922095 consider watch -0.2922095 considering consider -0.2834328 for , -0.5511513 higher more -0.5845945 higher small -0.2834328 however , -0.2922095 i would -0.2922095 immediate concerns -0.2922095 in biarritz -0.2922095 is to -0.09021038 little more -0.1998621 -0.7273645 loin , -0.6925742 loin . -0.6708385 loin </s> -0.2922095 look beyond -0.4638903 looking higher -0.4638903 looking on -0.4771212 -0.5136299 more . -0.4771212 -0.3561665 more loin -0.1649931 on a -0.4771213 -0.1649931 screening a -0.4771213 -0.2705918 small . -0.287799 the screening -0.2922095 to look -0.2622373 watch </s> -0.2922095 watching considering -0.2922095 what i -0.2922095 would also -2 also would -6 -6 foo bar \3-grams: -0.01916512 more . </s> -0.0283603 on a little -0.4771212 -0.0283603 screening a little -0.4771212 -0.01660496 a little more -0.09409451 -0.3488368 <s> looking higher -0.3488368 <s> looking on -0.4771212 -0.1892331 little more loin -0.04835128 looking on a -0.4771212 -3 also would consider -7 -7 to look a \4-grams: -0.009249173 looking on a little -0.4771212 -0.005464747 on a little more -0.4771212 -0.005464747 screening a little more -0.1453306 a little more loin -0.01552657 <s> looking on a -0.4771212 -4 also would consider higher -8 \5-grams: -0.003061223 <s> looking on a little -0.001813953 looking on a little more -0.0432557 on a little more loin -5 also would consider higher looking \end\
0
coqui_public_repos/STT-examples/python_websocket_server/helm
coqui_public_repos/STT-examples/python_websocket_server/helm/stt_server/values.yaml
# Default values for stt-server. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: "<docker_repo_path>" pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: "" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" podAnnotations: {} podSecurityContext: {} # fsGroup: 2000 securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 1000 service: type: ClusterIP port: 8080 ingress: enabled: false annotations: {} hosts: [] tls: [] resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi autoscaling: enabled: false minReplicas: 1 maxReplicas: 100 targetCPUUtilizationPercentage: 80 nodeSelector: {} tolerations: [] affinity: {}
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/vc_tests/test_freevc.py
import os import unittest import torch from tests import get_tests_input_path from TTS.vc.configs.freevc_config import FreeVCConfig from TTS.vc.models.freevc import FreeVC # pylint: disable=unused-variable # pylint: disable=no-self-use torch.manual_seed(1) use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") c = FreeVCConfig() WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") BATCH_SIZE = 3 def count_parameters(model): r"""Count number of trainable parameters in a network""" return sum(p.numel() for p in model.parameters() if p.requires_grad) class TestFreeVC(unittest.TestCase): def _create_inputs(self, config, batch_size=2): input_dummy = torch.rand(batch_size, 30 * config.audio["hop_length"]).to(device) input_lengths = torch.randint(100, 30 * config.audio["hop_length"], (batch_size,)).long().to(device) input_lengths[-1] = 30 * config.audio["hop_length"] spec = torch.rand(batch_size, 30, config.audio["filter_length"] // 2 + 1).to(device) mel = torch.rand(batch_size, 30, config.audio["n_mel_channels"]).to(device) spec_lengths = torch.randint(20, 30, (batch_size,)).long().to(device) spec_lengths[-1] = spec.size(2) waveform = torch.rand(batch_size, spec.size(2) * config.audio["hop_length"]).to(device) return input_dummy, input_lengths, mel, spec, spec_lengths, waveform @staticmethod def _create_inputs_inference(): source_wav = torch.rand(16000) target_wav = torch.rand(16000) return source_wav, target_wav @staticmethod def _check_parameter_changes(model, model_ref): count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref ) count += 1 def test_methods(self): config = FreeVCConfig() model = FreeVC(config).to(device) model.load_pretrained_speaker_encoder() model.init_multispeaker(config) wavlm_feats = model.extract_wavlm_features(torch.rand(1, 16000)) assert wavlm_feats.shape == (1, 1024, 49), wavlm_feats.shape def test_load_audio(self): config = FreeVCConfig() model = FreeVC(config).to(device) wav = model.load_audio(WAV_FILE) wav2 = model.load_audio(wav) assert all(torch.isclose(wav, wav2)) def _test_forward(self, batch_size): # create model config = FreeVCConfig() model = FreeVC(config).to(device) model.train() print(" > Num parameters for FreeVC model:%s" % (count_parameters(model))) _, _, mel, spec, spec_lengths, waveform = self._create_inputs(config, batch_size) wavlm_vec = model.extract_wavlm_features(waveform) wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long) y = model.forward(wavlm_vec, spec, None, mel, spec_lengths, wavlm_vec_lengths) # TODO: assert with training implementation def test_forward(self): self._test_forward(1) self._test_forward(3) def _test_inference(self, batch_size): config = FreeVCConfig() model = FreeVC(config).to(device) model.eval() _, _, mel, _, _, waveform = self._create_inputs(config, batch_size) wavlm_vec = model.extract_wavlm_features(waveform) wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long) output_wav = model.inference(wavlm_vec, None, mel, wavlm_vec_lengths) assert ( output_wav.shape[-1] // config.audio.hop_length == wavlm_vec.shape[-1] ), f"{output_wav.shape[-1] // config.audio.hop_length} != {wavlm_vec.shape}" def test_inference(self): self._test_inference(1) self._test_inference(3) def test_voice_conversion(self): config = FreeVCConfig() model = FreeVC(config).to(device) model.eval() source_wav, target_wav = self._create_inputs_inference() output_wav = model.voice_conversion(source_wav, target_wav) assert ( output_wav.shape[0] + config.audio.hop_length == source_wav.shape[0] ), f"{output_wav.shape} != {source_wav.shape}" def test_train_step(self): ... def test_train_eval_log(self): ... def test_test_run(self): ... def test_load_checkpoint(self): ... def test_get_criterion(self): ... def test_init_from_config(self): ...
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson/internal/meta.h
// Tencent is pleased to support the open source community by making RapidJSON available. // // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://opensource.org/licenses/MIT // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef CEREAL_RAPIDJSON_INTERNAL_META_H_ #define CEREAL_RAPIDJSON_INTERNAL_META_H_ #include "../rapidjson.h" #ifdef __GNUC__ CEREAL_RAPIDJSON_DIAG_PUSH CEREAL_RAPIDJSON_DIAG_OFF(effc++) #endif #if defined(_MSC_VER) && !defined(__clang__) CEREAL_RAPIDJSON_DIAG_PUSH CEREAL_RAPIDJSON_DIAG_OFF(6334) #endif #if CEREAL_RAPIDJSON_HAS_CXX11_TYPETRAITS #include <type_traits> #endif //@cond CEREAL_RAPIDJSON_INTERNAL CEREAL_RAPIDJSON_NAMESPACE_BEGIN namespace internal { // Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching template <typename T> struct Void { typedef void Type; }; /////////////////////////////////////////////////////////////////////////////// // BoolType, TrueType, FalseType // template <bool Cond> struct BoolType { static const bool Value = Cond; typedef BoolType Type; }; typedef BoolType<true> TrueType; typedef BoolType<false> FalseType; /////////////////////////////////////////////////////////////////////////////// // SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr // template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; }; template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; }; template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {}; template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {}; template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {}; template <> struct AndExprCond<true, true> : TrueType {}; template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {}; template <> struct OrExprCond<false, false> : FalseType {}; template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {}; template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {}; template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {}; template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {}; /////////////////////////////////////////////////////////////////////////////// // AddConst, MaybeAddConst, RemoveConst template <typename T> struct AddConst { typedef const T Type; }; template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {}; template <typename T> struct RemoveConst { typedef T Type; }; template <typename T> struct RemoveConst<const T> { typedef T Type; }; /////////////////////////////////////////////////////////////////////////////// // IsSame, IsConst, IsMoreConst, IsPointer // template <typename T, typename U> struct IsSame : FalseType {}; template <typename T> struct IsSame<T, T> : TrueType {}; template <typename T> struct IsConst : FalseType {}; template <typename T> struct IsConst<const T> : TrueType {}; template <typename CT, typename T> struct IsMoreConst : AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>, BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {}; template <typename T> struct IsPointer : FalseType {}; template <typename T> struct IsPointer<T*> : TrueType {}; /////////////////////////////////////////////////////////////////////////////// // IsBaseOf // #if CEREAL_RAPIDJSON_HAS_CXX11_TYPETRAITS template <typename B, typename D> struct IsBaseOf : BoolType< ::std::is_base_of<B,D>::value> {}; #else // simplified version adopted from Boost template<typename B, typename D> struct IsBaseOfImpl { CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0); CEREAL_RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0); typedef char (&Yes)[1]; typedef char (&No) [2]; template <typename T> static Yes Check(const D*, T); static No Check(const B*, int); struct Host { operator const B*() const; operator const D*(); }; enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) }; }; template <typename B, typename D> struct IsBaseOf : OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {}; #endif // CEREAL_RAPIDJSON_HAS_CXX11_TYPETRAITS ////////////////////////////////////////////////////////////////////////// // EnableIf / DisableIf // template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; }; template <typename T> struct EnableIfCond<false, T> { /* empty */ }; template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; }; template <typename T> struct DisableIfCond<true, T> { /* empty */ }; template <typename Condition, typename T = void> struct EnableIf : EnableIfCond<Condition::Value, T> {}; template <typename Condition, typename T = void> struct DisableIf : DisableIfCond<Condition::Value, T> {}; // SFINAE helpers struct SfinaeTag {}; template <typename T> struct RemoveSfinaeTag; template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; }; #define CEREAL_RAPIDJSON_REMOVEFPTR_(type) \ typename ::CEREAL_RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \ < ::CEREAL_RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type #define CEREAL_RAPIDJSON_ENABLEIF(cond) \ typename ::CEREAL_RAPIDJSON_NAMESPACE::internal::EnableIf \ <CEREAL_RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL #define CEREAL_RAPIDJSON_DISABLEIF(cond) \ typename ::CEREAL_RAPIDJSON_NAMESPACE::internal::DisableIf \ <CEREAL_RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL #define CEREAL_RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \ typename ::CEREAL_RAPIDJSON_NAMESPACE::internal::EnableIf \ <CEREAL_RAPIDJSON_REMOVEFPTR_(cond), \ CEREAL_RAPIDJSON_REMOVEFPTR_(returntype)>::Type #define CEREAL_RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \ typename ::CEREAL_RAPIDJSON_NAMESPACE::internal::DisableIf \ <CEREAL_RAPIDJSON_REMOVEFPTR_(cond), \ CEREAL_RAPIDJSON_REMOVEFPTR_(returntype)>::Type } // namespace internal CEREAL_RAPIDJSON_NAMESPACE_END //@endcond #if defined(_MSC_VER) && !defined(__clang__) CEREAL_RAPIDJSON_DIAG_POP #endif #ifdef __GNUC__ CEREAL_RAPIDJSON_DIAG_POP #endif #endif // CEREAL_RAPIDJSON_INTERNAL_META_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstproject.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_bool(project_output, false, "Project on output (vs. input)"); int fstproject_main(int argc, char **argv); int main(int argc, char **argv) { return fstproject_main(argc, argv); }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/pdt/reverse.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Expands a PDT to an FST. #ifndef FST_EXTENSIONS_PDT_REVERSE_H_ #define FST_EXTENSIONS_PDT_REVERSE_H_ #include <vector> #include <fst/mutable-fst.h> #include <fst/relabel.h> #include <fst/reverse.h> namespace fst { // Reverses a pushdown transducer (PDT) encoded as an FST. template <class Arc, class RevArc> void Reverse(const Fst<Arc> &ifst, const std::vector< std::pair<typename Arc::Label, typename Arc::Label>> &parens, MutableFst<RevArc> *ofst) { using Label = typename Arc::Label; // Reverses FST component. Reverse(ifst, ofst); // Exchanges open and close parenthesis pairs. std::vector<std::pair<Label, Label>> relabel_pairs; relabel_pairs.reserve(2 * parens.size()); for (const auto &pair : parens) { relabel_pairs.emplace_back(pair.first, pair.second); relabel_pairs.emplace_back(pair.second, pair.first); } Relabel(ofst, relabel_pairs, relabel_pairs); } } // namespace fst #endif // FST_EXTENSIONS_PDT_REVERSE_H_
0