repo_id
stringlengths
18
103
file_path
stringlengths
30
136
content
stringlengths
2
3.36M
__index_level_0__
int64
0
0
coqui_public_repos/TTS/TTS/encoder
coqui_public_repos/TTS/TTS/encoder/utils/prepare_voxceleb.py
# coding=utf-8 # Copyright (C) 2020 ATHENA AUTHORS; Yiping Peng; Ne Luo # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Only support eager mode and TF>=2.0.0 # pylint: disable=no-member, invalid-name, relative-beyond-top-level # pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes """ voxceleb 1 & 2 """ import hashlib import os import subprocess import sys import zipfile import pandas import soundfile as sf from absl import logging SUBSETS = { "vox1_dev_wav": [ "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partaa", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partab", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partac", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partad", ], "vox1_test_wav": ["https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip"], "vox2_dev_aac": [ "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaa", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partab", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partac", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partad", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partae", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaf", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partag", "https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partah", ], "vox2_test_aac": ["https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_test_aac.zip"], } MD5SUM = { "vox1_dev_wav": "ae63e55b951748cc486645f532ba230b", "vox2_dev_aac": "bbc063c46078a602ca71605645c2a402", "vox1_test_wav": "185fdc63c3c739954633d50379a3d102", "vox2_test_aac": "0d2b3ea430a821c33263b5ea37ede312", } USER = {"user": "", "password": ""} speaker_id_dict = {} def download_and_extract(directory, subset, urls): """Download and extract the given split of dataset. Args: directory: the directory where to put the downloaded data. subset: subset name of the corpus. urls: the list of urls to download the data file. """ os.makedirs(directory, exist_ok=True) try: for url in urls: zip_filepath = os.path.join(directory, url.split("/")[-1]) if os.path.exists(zip_filepath): continue logging.info("Downloading %s to %s" % (url, zip_filepath)) subprocess.call( "wget %s --user %s --password %s -O %s" % (url, USER["user"], USER["password"], zip_filepath), shell=True, ) statinfo = os.stat(zip_filepath) logging.info("Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size)) # concatenate all parts into zip files if ".zip" not in zip_filepath: zip_filepath = "_".join(zip_filepath.split("_")[:-1]) subprocess.call("cat %s* > %s.zip" % (zip_filepath, zip_filepath), shell=True) zip_filepath += ".zip" extract_path = zip_filepath.strip(".zip") # check zip file md5sum with open(zip_filepath, "rb") as f_zip: md5 = hashlib.md5(f_zip.read()).hexdigest() if md5 != MD5SUM[subset]: raise ValueError("md5sum of %s mismatch" % zip_filepath) with zipfile.ZipFile(zip_filepath, "r") as zfile: zfile.extractall(directory) extract_path_ori = os.path.join(directory, zfile.infolist()[0].filename) subprocess.call("mv %s %s" % (extract_path_ori, extract_path), shell=True) finally: # os.remove(zip_filepath) pass def exec_cmd(cmd): """Run a command in a subprocess. Args: cmd: command line to be executed. Return: int, the return code. """ try: retcode = subprocess.call(cmd, shell=True) if retcode < 0: logging.info(f"Child was terminated by signal {retcode}") except OSError as e: logging.info(f"Execution failed: {e}") retcode = -999 return retcode def decode_aac_with_ffmpeg(aac_file, wav_file): """Decode a given AAC file into WAV using ffmpeg. Args: aac_file: file path to input AAC file. wav_file: file path to output WAV file. Return: bool, True if success. """ cmd = f"ffmpeg -i {aac_file} {wav_file}" logging.info(f"Decoding aac file using command line: {cmd}") ret = exec_cmd(cmd) if ret != 0: logging.error(f"Failed to decode aac file with retcode {ret}") logging.error("Please check your ffmpeg installation.") return False return True def convert_audio_and_make_label(input_dir, subset, output_dir, output_file): """Optionally convert AAC to WAV and make speaker labels. Args: input_dir: the directory which holds the input dataset. subset: the name of the specified subset. e.g. vox1_dev_wav output_dir: the directory to place the newly generated csv files. output_file: the name of the newly generated csv file. e.g. vox1_dev_wav.csv """ logging.info("Preprocessing audio and label for subset %s" % subset) source_dir = os.path.join(input_dir, subset) files = [] # Convert all AAC file into WAV format. At the same time, generate the csv for root, _, filenames in os.walk(source_dir): for filename in filenames: name, ext = os.path.splitext(filename) if ext.lower() == ".wav": _, ext2 = os.path.splitext(name) if ext2: continue wav_file = os.path.join(root, filename) elif ext.lower() == ".m4a": # Convert AAC to WAV. aac_file = os.path.join(root, filename) wav_file = aac_file + ".wav" if not os.path.exists(wav_file): if not decode_aac_with_ffmpeg(aac_file, wav_file): raise RuntimeError("Audio decoding failed.") else: continue speaker_name = root.split(os.path.sep)[-2] if speaker_name not in speaker_id_dict: num = len(speaker_id_dict) speaker_id_dict[speaker_name] = num # wav_filesize = os.path.getsize(wav_file) wav_length = len(sf.read(wav_file)[0]) files.append((os.path.abspath(wav_file), wav_length, speaker_id_dict[speaker_name], speaker_name)) # Write to CSV file which contains four columns: # "wav_filename", "wav_length_ms", "speaker_id", "speaker_name". csv_file_path = os.path.join(output_dir, output_file) df = pandas.DataFrame(data=files, columns=["wav_filename", "wav_length_ms", "speaker_id", "speaker_name"]) df.to_csv(csv_file_path, index=False, sep="\t") logging.info("Successfully generated csv file {}".format(csv_file_path)) def processor(directory, subset, force_process): """download and process""" urls = SUBSETS if subset not in urls: raise ValueError(subset, "is not in voxceleb") subset_csv = os.path.join(directory, subset + ".csv") if not force_process and os.path.exists(subset_csv): return subset_csv logging.info("Downloading and process the voxceleb in %s", directory) logging.info("Preparing subset %s", subset) download_and_extract(directory, subset, urls[subset]) convert_audio_and_make_label(directory, subset, directory, subset + ".csv") logging.info("Finished downloading and processing") return subset_csv if __name__ == "__main__": logging.set_verbosity(logging.INFO) if len(sys.argv) != 4: print("Usage: python prepare_data.py save_directory user password") sys.exit() DIR, USER["user"], USER["password"] = sys.argv[1], sys.argv[2], sys.argv[3] for SUBSET in SUBSETS: processor(DIR, SUBSET, False)
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/extensions/python/Makefile.in
# Makefile.in generated by automake 1.14.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # NB: we use the Cython-generated .cc files rather than the *.pxd/.pyx sources # used to generate them. Consequently, modifications to the .pyx files will not # influence the build unless the .cc files are regenerated using Cython. VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/extensions/python DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pythondir)" LTLIBRARIES = $(python_LTLIBRARIES) am__DEPENDENCIES_1 = pywrapfst_la_DEPENDENCIES = ../far/libfstfarscript.la \ ../far/libfstfar.la ../../script/libfstscript.la \ ../../lib/libfst.la $(am__DEPENDENCIES_1) am_pywrapfst_la_OBJECTS = pywrapfst_la-pywrapfst.lo pywrapfst_la_OBJECTS = $(am_pywrapfst_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = pywrapfst_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(pywrapfst_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(pywrapfst_la_SOURCES) DIST_SOURCES = $(pywrapfst_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ python_LTLIBRARIES = pywrapfst.la pyexec_LTILIBRARIES = pywrapfst.la pywrapfst_la_SOURCES = pywrapfst.cc pywrapfst_la_CPPFLAGS = -I$(srcdir)/../../include $(PYTHON_CPPFLAGS) pywrapfst_la_LDFLAGS = $(PYTHON_LDFLAGS) -avoid-version -module pywrapfst_la_LIBADD = ../far/libfstfarscript.la ../far/libfstfar.la \ ../../script/libfstscript.la ../../lib/libfst.la \ -lm $(DL_LIBS) # Exports the *.pxd/*.pxd source files. EXTRA_DIST = basictypes.pxd fst.pxd ios.pxd memory.pxd pywrapfst.pxd \ pywrapfst.pyx all: all-am .SUFFIXES: .SUFFIXES: .cc .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/extensions/python/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pythonLTLIBRARIES: $(python_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(python_LTLIBRARIES)'; test -n "$(pythondir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pythondir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pythondir)"; \ } uninstall-pythonLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(python_LTLIBRARIES)'; test -n "$(pythondir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pythondir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pythondir)/$$f"; \ done clean-pythonLTLIBRARIES: -test -z "$(python_LTLIBRARIES)" || rm -f $(python_LTLIBRARIES) @list='$(python_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } pywrapfst.la: $(pywrapfst_la_OBJECTS) $(pywrapfst_la_DEPENDENCIES) $(EXTRA_pywrapfst_la_DEPENDENCIES) $(AM_V_CXXLD)$(pywrapfst_la_LINK) -rpath $(pythondir) $(pywrapfst_la_OBJECTS) $(pywrapfst_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pywrapfst_la-pywrapfst.Plo@am__quote@ .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cc.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< pywrapfst_la-pywrapfst.lo: pywrapfst.cc @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(pywrapfst_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT pywrapfst_la-pywrapfst.lo -MD -MP -MF $(DEPDIR)/pywrapfst_la-pywrapfst.Tpo -c -o pywrapfst_la-pywrapfst.lo `test -f 'pywrapfst.cc' || echo '$(srcdir)/'`pywrapfst.cc @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/pywrapfst_la-pywrapfst.Tpo $(DEPDIR)/pywrapfst_la-pywrapfst.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='pywrapfst.cc' object='pywrapfst_la-pywrapfst.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(pywrapfst_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o pywrapfst_la-pywrapfst.lo `test -f 'pywrapfst.cc' || echo '$(srcdir)/'`pywrapfst.cc mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pythondir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pythonLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-pythonLTLIBRARIES install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pythonLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-pythonLTLIBRARIES cscopelist-am ctags \ ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-pythonLTLIBRARIES \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-pythonLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/tc-android-ds-tests.sh
#!/bin/bash set -xe arm_flavor=$1 api_level=$2 source $(dirname "$0")/tc-tests-utils.sh bitrate=$3 set_ldc_sample_filename "${bitrate}" model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite} model_name=$(basename "${model_source}") export DATA_TMP_DIR=${ANDROID_TMP_DIR}/ds download_material "${TASKCLUSTER_TMP_DIR}/ds" android_start_emulator "${arm_flavor}" "${api_level}" android_setup_ndk_data run_tflite_basic_inference_tests run_android_hotword_tests android_stop_emulator
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/accumulator.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes to accumulate arc weights. Useful for weight lookahead. #ifndef FST_ACCUMULATOR_H_ #define FST_ACCUMULATOR_H_ #include <algorithm> #include <functional> #include <unordered_map> #include <vector> #include <fst/log.h> #include <fst/arcfilter.h> #include <fst/arcsort.h> #include <fst/dfs-visit.h> #include <fst/expanded-fst.h> #include <fst/replace.h> namespace fst { // This class accumulates arc weights using the semiring Plus(). template <class A> class DefaultAccumulator { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; DefaultAccumulator() {} DefaultAccumulator(const DefaultAccumulator &acc, bool safe = false) {} void Init(const Fst<Arc> &fst, bool copy = false) {} void SetState(StateId state) {} Weight Sum(Weight w, Weight v) { return Plus(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) { Adder<Weight> adder(w); // maintains cumulative sum accurately aiter->Seek(begin); for (auto pos = begin; pos < end; aiter->Next(), ++pos) adder.Add(aiter->Value().weight); return adder.Sum(); } constexpr bool Error() const { return false; } private: DefaultAccumulator &operator=(const DefaultAccumulator &) = delete; }; // This class accumulates arc weights using the log semiring Plus() assuming an // arc weight has a WeightConvert specialization to and from log64 weights. template <class A> class LogAccumulator { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; LogAccumulator() {} LogAccumulator(const LogAccumulator &acc, bool safe = false) {} void Init(const Fst<Arc> &fst, bool copy = false) {} void SetState(StateId s) {} Weight Sum(Weight w, Weight v) { return LogPlus(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) { auto sum = w; aiter->Seek(begin); for (auto pos = begin; pos < end; aiter->Next(), ++pos) { sum = LogPlus(sum, aiter->Value().weight); } return sum; } constexpr bool Error() const { return false; } private: Weight LogPlus(Weight w, Weight v) { if (w == Weight::Zero()) { return v; } const auto f1 = to_log_weight_(w).Value(); const auto f2 = to_log_weight_(v).Value(); if (f1 > f2) { return to_weight_(Log64Weight(f2 - internal::LogPosExp(f1 - f2))); } else { return to_weight_(Log64Weight(f1 - internal::LogPosExp(f2 - f1))); } } WeightConvert<Weight, Log64Weight> to_log_weight_; WeightConvert<Log64Weight, Weight> to_weight_; LogAccumulator &operator=(const LogAccumulator &) = delete; }; // Interface for shareable data for fast log accumulator copies. Holds pointers // to data only, storage is provided by derived classes. class FastLogAccumulatorData { public: FastLogAccumulatorData(int arc_limit, int arc_period) : arc_limit_(arc_limit), arc_period_(arc_period), weights_ptr_(nullptr), num_weights_(0), weight_positions_ptr_(nullptr), num_positions_(0) {} virtual ~FastLogAccumulatorData() {} // Cummulative weight per state for all states s.t. # of arcs > arc_limit_ // with arcs in order. The first element per state is Log64Weight::Zero(). const double *Weights() const { return weights_ptr_; } int NumWeights() const { return num_weights_; } // Maps from state to corresponding beginning weight position in weights_. // osition -1 means no pre-computed weights for that state. const int *WeightPositions() const { return weight_positions_ptr_; } int NumPositions() const { return num_positions_; } int ArcLimit() const { return arc_limit_; } int ArcPeriod() const { return arc_period_; } // Returns true if the data object is mutable and supports SetData(). virtual bool IsMutable() const = 0; // Does not take ownership but may invalidate the contents of weights and // weight_positions. virtual void SetData(std::vector<double> *weights, std::vector<int> *weight_positions) = 0; protected: void Init(int num_weights, const double *weights, int num_positions, const int *weight_positions) { weights_ptr_ = weights; num_weights_ = num_weights; weight_positions_ptr_ = weight_positions; num_positions_ = num_positions; } private: const int arc_limit_; const int arc_period_; const double *weights_ptr_; int num_weights_; const int *weight_positions_ptr_; int num_positions_; FastLogAccumulatorData(const FastLogAccumulatorData &) = delete; FastLogAccumulatorData &operator=(const FastLogAccumulatorData &) = delete; }; // FastLogAccumulatorData with mutable storage; filled by // FastLogAccumulator::Init. class MutableFastLogAccumulatorData : public FastLogAccumulatorData { public: MutableFastLogAccumulatorData(int arc_limit, int arc_period) : FastLogAccumulatorData(arc_limit, arc_period) {} bool IsMutable() const override { return true; } void SetData(std::vector<double> *weights, std::vector<int> *weight_positions) override { weights_.swap(*weights); weight_positions_.swap(*weight_positions); Init(weights_.size(), weights_.data(), weight_positions_.size(), weight_positions_.data()); } private: std::vector<double> weights_; std::vector<int> weight_positions_; MutableFastLogAccumulatorData(const MutableFastLogAccumulatorData &) = delete; MutableFastLogAccumulatorData &operator=( const MutableFastLogAccumulatorData &) = delete; }; // This class accumulates arc weights using the log semiring Plus() assuming an // arc weight has a WeightConvert specialization to and from log64 weights. The // member function Init(fst) has to be called to setup pre-computed weight // information. template <class A> class FastLogAccumulator { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit FastLogAccumulator(ssize_t arc_limit = 20, ssize_t arc_period = 10) : to_log_weight_(), to_weight_(), arc_limit_(arc_limit), arc_period_(arc_period), data_(std::make_shared<MutableFastLogAccumulatorData>(arc_limit, arc_period)), state_weights_(nullptr), error_(false) {} explicit FastLogAccumulator(std::shared_ptr<FastLogAccumulatorData> data) : to_log_weight_(), to_weight_(), arc_limit_(data->ArcLimit()), arc_period_(data->ArcPeriod()), data_(data), state_weights_(nullptr), error_(false) {} FastLogAccumulator(const FastLogAccumulator<Arc> &acc, bool safe = false) : to_log_weight_(), to_weight_(), arc_limit_(acc.arc_limit_), arc_period_(acc.arc_period_), data_(acc.data_), state_weights_(nullptr), error_(acc.error_) {} void SetState(StateId s) { const auto *weights = data_->Weights(); const auto *weight_positions = data_->WeightPositions(); state_weights_ = nullptr; if (s < data_->NumPositions()) { const auto pos = weight_positions[s]; if (pos >= 0) state_weights_ = &(weights[pos]); } } Weight Sum(Weight w, Weight v) const { return LogPlus(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) const { if (error_) return Weight::NoWeight(); auto sum = w; // Finds begin and end of pre-stored weights. ssize_t index_begin = -1; ssize_t index_end = -1; ssize_t stored_begin = end; ssize_t stored_end = end; if (state_weights_) { index_begin = begin > 0 ? (begin - 1) / arc_period_ + 1 : 0; index_end = end / arc_period_; stored_begin = index_begin * arc_period_; stored_end = index_end * arc_period_; } // Computes sum before pre-stored weights. if (begin < stored_begin) { const auto pos_end = std::min(stored_begin, end); aiter->Seek(begin); for (auto pos = begin; pos < pos_end; aiter->Next(), ++pos) { sum = LogPlus(sum, aiter->Value().weight); } } // Computes sum between pre-stored weights. if (stored_begin < stored_end) { const auto f1 = state_weights_[index_end]; const auto f2 = state_weights_[index_begin]; if (f1 < f2) sum = LogPlus(sum, LogMinus(f1, f2)); // Commented out for efficiency; adds Zero(). /* else { // explicitly computes if cumulative sum lacks precision aiter->Seek(stored_begin); for (auto pos = stored_begin; pos < stored_end; aiter->Next(), ++pos) sum = LogPlus(sum, aiter->Value().weight); } */ } // Computes sum after pre-stored weights. if (stored_end < end) { const auto pos_start = std::max(stored_begin, stored_end); aiter->Seek(pos_start); for (auto pos = pos_start; pos < end; aiter->Next(), ++pos) { sum = LogPlus(sum, aiter->Value().weight); } } return sum; } template <class FST> void Init(const FST &fst, bool copy = false) { if (copy || !data_->IsMutable()) return; if (data_->NumPositions() != 0 || arc_limit_ < arc_period_) { FSTERROR() << "FastLogAccumulator: Initialization error"; error_ = true; return; } std::vector<double> weights; std::vector<int> weight_positions; weight_positions.reserve(CountStates(fst)); for (StateIterator<FST> siter(fst); !siter.Done(); siter.Next()) { const auto s = siter.Value(); if (fst.NumArcs(s) >= arc_limit_) { auto sum = FloatLimits<double>::PosInfinity(); if (weight_positions.size() <= s) weight_positions.resize(s + 1, -1); weight_positions[s] = weights.size(); weights.push_back(sum); size_t narcs = 0; ArcIterator<FST> aiter(fst, s); aiter.SetFlags(kArcWeightValue | kArcNoCache, kArcFlags); for (; !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); sum = LogPlus(sum, arc.weight); // Stores cumulative weight distribution per arc_period_. if (++narcs % arc_period_ == 0) weights.push_back(sum); } } } data_->SetData(&weights, &weight_positions); } bool Error() const { return error_; } std::shared_ptr<FastLogAccumulatorData> GetData() const { return data_; } private: static double LogPosExp(double x) { return x == FloatLimits<double>::PosInfinity() ? 0.0 : log(1.0F + exp(-x)); } static double LogMinusExp(double x) { return x == FloatLimits<double>::PosInfinity() ? 0.0 : log(1.0F - exp(-x)); } Weight LogPlus(Weight w, Weight v) const { if (w == Weight::Zero()) { return v; } const auto f1 = to_log_weight_(w).Value(); const auto f2 = to_log_weight_(v).Value(); if (f1 > f2) { return to_weight_(Log64Weight(f2 - LogPosExp(f1 - f2))); } else { return to_weight_(Log64Weight(f1 - LogPosExp(f2 - f1))); } } double LogPlus(double f1, Weight v) const { const auto f2 = to_log_weight_(v).Value(); if (f1 == FloatLimits<double>::PosInfinity()) { return f2; } else if (f1 > f2) { return f2 - LogPosExp(f1 - f2); } else { return f1 - LogPosExp(f2 - f1); } } // Assumes f1 < f2. Weight LogMinus(double f1, double f2) const { if (f2 == FloatLimits<double>::PosInfinity()) { return to_weight_(Log64Weight(f1)); } else { return to_weight_(Log64Weight(f1 - LogMinusExp(f2 - f1))); } } const WeightConvert<Weight, Log64Weight> to_log_weight_; const WeightConvert<Log64Weight, Weight> to_weight_; const ssize_t arc_limit_; // Minimum number of arcs to pre-compute state. const ssize_t arc_period_; // Saves cumulative weights per arc_period_. std::shared_ptr<FastLogAccumulatorData> data_; const double *state_weights_; bool error_; FastLogAccumulator &operator=(const FastLogAccumulator &) = delete; }; // Stores shareable data for cache log accumulator copies. All copies share the // same cache. template <class Arc> class CacheLogAccumulatorData { public: using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; CacheLogAccumulatorData(bool gc, size_t gc_limit) : cache_gc_(gc), cache_limit_(gc_limit), cache_size_(0) {} CacheLogAccumulatorData(const CacheLogAccumulatorData<Arc> &data) : cache_gc_(data.cache_gc_), cache_limit_(data.cache_limit_), cache_size_(0) {} bool CacheDisabled() const { return cache_gc_ && cache_limit_ == 0; } std::vector<double> *GetWeights(StateId s) { auto it = cache_.find(s); if (it != cache_.end()) { it->second.recent = true; return it->second.weights.get(); } else { return nullptr; } } void AddWeights(StateId s, std::vector<double> *weights) { if (cache_gc_ && cache_size_ >= cache_limit_) GC(false); cache_.insert(std::make_pair(s, CacheState(weights, true))); if (cache_gc_) cache_size_ += weights->capacity() * sizeof(double); } private: // Cached information for a given state. struct CacheState { std::unique_ptr<std::vector<double>> weights; // Accumulated weights. bool recent; // Has this state been accessed since last GC? CacheState(std::vector<double> *weights, bool recent) : weights(weights), recent(recent) {} }; // Garbage collect: Deletes from cache states that have not been accessed // since the last GC ('free_recent = false') until 'cache_size_' is 2/3 of // 'cache_limit_'. If it does not free enough memory, start deleting // recently accessed states. void GC(bool free_recent) { auto cache_target = (2 * cache_limit_) / 3 + 1; auto it = cache_.begin(); while (it != cache_.end() && cache_size_ > cache_target) { auto &cs = it->second; if (free_recent || !cs.recent) { cache_size_ -= cs.weights->capacity() * sizeof(double); cache_.erase(it++); } else { cs.recent = false; ++it; } } if (!free_recent && cache_size_ > cache_target) GC(true); } std::unordered_map<StateId, CacheState> cache_; // Cache. bool cache_gc_; // Enables garbage collection. size_t cache_limit_; // # of bytes cached. size_t cache_size_; // # of bytes allowed before GC. CacheLogAccumulatorData &operator=(const CacheLogAccumulatorData &) = delete; }; // This class accumulates arc weights using the log semiring Plus() has a // WeightConvert specialization to and from log64 weights. It is similar to the // FastLogAccumator. However here, the accumulated weights are pre-computed and // stored only for the states that are visited. The member function Init(fst) // has to be called to setup this accumulator. template <class Arc> class CacheLogAccumulator { public: using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; explicit CacheLogAccumulator(ssize_t arc_limit = 10, bool gc = false, size_t gc_limit = 10 * 1024 * 1024) : arc_limit_(arc_limit), data_(std::make_shared<CacheLogAccumulatorData<Arc>>(gc, gc_limit)), s_(kNoStateId), error_(false) {} CacheLogAccumulator(const CacheLogAccumulator<Arc> &acc, bool safe = false) : arc_limit_(acc.arc_limit_), fst_(acc.fst_ ? acc.fst_->Copy() : nullptr), data_(safe ? std::make_shared<CacheLogAccumulatorData<Arc>>(*acc.data_) : acc.data_), s_(kNoStateId), error_(acc.error_) {} // Argument arc_limit specifies the minimum number of arcs to pre-compute. void Init(const Fst<Arc> &fst, bool copy = false) { if (!copy && fst_) { FSTERROR() << "CacheLogAccumulator: Initialization error"; error_ = true; return; } fst_.reset(fst.Copy()); } void SetState(StateId s, int depth = 0) { if (s == s_) return; s_ = s; if (data_->CacheDisabled() || error_) { weights_ = nullptr; return; } if (!fst_) { FSTERROR() << "CacheLogAccumulator::SetState: Incorrectly initialized"; error_ = true; weights_ = nullptr; return; } weights_ = data_->GetWeights(s); if ((weights_ == nullptr) && (fst_->NumArcs(s) >= arc_limit_)) { weights_ = new std::vector<double>; weights_->reserve(fst_->NumArcs(s) + 1); weights_->push_back(FloatLimits<double>::PosInfinity()); data_->AddWeights(s, weights_); } } Weight Sum(Weight w, Weight v) { return LogPlus(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) { if (weights_ == nullptr) { auto sum = w; aiter->Seek(begin); for (auto pos = begin; pos < end; aiter->Next(), ++pos) { sum = LogPlus(sum, aiter->Value().weight); } return sum; } else { Extend(end, aiter); const auto &f1 = (*weights_)[end]; const auto &f2 = (*weights_)[begin]; if (f1 < f2) { return LogPlus(w, LogMinus(f1, f2)); } else { // Commented out for efficiency; adds Zero(). /* auto sum = w; // Explicitly computes if cumulative sum lacks precision. aiter->Seek(begin); for (auto pos = begin; pos < end; aiter->Next(), ++pos) { sum = LogPlus(sum, aiter->Value().weight); } return sum; */ return w; } } } // Returns first position from aiter->Position() whose accumulated // value is greater or equal to w (w.r.t. Zero() < One()). The // iterator may be repositioned. template <class ArcIter> size_t LowerBound(Weight w, ArcIter *aiter) { const auto f = to_log_weight_(w).Value(); auto pos = aiter->Position(); if (weights_) { Extend(fst_->NumArcs(s_), aiter); return std::lower_bound(weights_->begin() + pos + 1, weights_->end(), f, std::greater<double>()) - weights_->begin() - 1; } else { size_t n = 0; auto x = FloatLimits<double>::PosInfinity(); for (aiter->Reset(); !aiter->Done(); aiter->Next(), ++n) { x = LogPlus(x, aiter->Value().weight); if (n >= pos && x <= f) break; } return n; } } bool Error() const { return error_; } private: double LogPosExp(double x) { return x == FloatLimits<double>::PosInfinity() ? 0.0 : log(1.0F + exp(-x)); } double LogMinusExp(double x) { return x == FloatLimits<double>::PosInfinity() ? 0.0 : log(1.0F - exp(-x)); } Weight LogPlus(Weight w, Weight v) { if (w == Weight::Zero()) { return v; } const auto f1 = to_log_weight_(w).Value(); const auto f2 = to_log_weight_(v).Value(); if (f1 > f2) { return to_weight_(Log64Weight(f2 - LogPosExp(f1 - f2))); } else { return to_weight_(Log64Weight(f1 - LogPosExp(f2 - f1))); } } double LogPlus(double f1, Weight v) { const auto f2 = to_log_weight_(v).Value(); if (f1 == FloatLimits<double>::PosInfinity()) { return f2; } else if (f1 > f2) { return f2 - LogPosExp(f1 - f2); } else { return f1 - LogPosExp(f2 - f1); } } // Assumes f1 < f2. Weight LogMinus(double f1, double f2) { if (f2 == FloatLimits<double>::PosInfinity()) { return to_weight_(Log64Weight(f1)); } else { return to_weight_(Log64Weight(f1 - LogMinusExp(f2 - f1))); } } // Extends weights up to index 'end'. template <class ArcIter> void Extend(ssize_t end, ArcIter *aiter) { if (weights_->size() <= end) { for (aiter->Seek(weights_->size() - 1); weights_->size() <= end; aiter->Next()) { weights_->push_back(LogPlus(weights_->back(), aiter->Value().weight)); } } } WeightConvert<Weight, Log64Weight> to_log_weight_; WeightConvert<Log64Weight, Weight> to_weight_; ssize_t arc_limit_; // Minimum # of arcs to cache a state. std::vector<double> *weights_; // Accumulated weights for cur. state. std::unique_ptr<const Fst<Arc>> fst_; // Input FST. std::shared_ptr<CacheLogAccumulatorData<Arc>> data_; // Cache data. StateId s_; // Current state. bool error_; }; // Stores shareable data for replace accumulator copies. template <class Accumulator, class T> class ReplaceAccumulatorData { public: using Arc = typename Accumulator::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using StateTable = T; using StateTuple = typename StateTable::StateTuple; ReplaceAccumulatorData() : state_table_(nullptr) {} explicit ReplaceAccumulatorData( const std::vector<Accumulator *> &accumulators) : state_table_(nullptr) { accumulators_.reserve(accumulators.size()); for (const auto accumulator : accumulators) { accumulators_.emplace_back(accumulator); } } void Init(const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_tuples, const StateTable *state_table) { state_table_ = state_table; accumulators_.resize(fst_tuples.size()); for (Label i = 0; i < accumulators_.size(); ++i) { if (!accumulators_[i]) { accumulators_[i].reset(new Accumulator()); accumulators_[i]->Init(*(fst_tuples[i].second)); } fst_array_.emplace_back(fst_tuples[i].second->Copy()); } } const StateTuple &GetTuple(StateId s) const { return state_table_->Tuple(s); } Accumulator *GetAccumulator(size_t i) { return accumulators_[i].get(); } const Fst<Arc> *GetFst(size_t i) const { return fst_array_[i].get(); } private: const StateTable *state_table_; std::vector<std::unique_ptr<Accumulator>> accumulators_; std::vector<std::unique_ptr<const Fst<Arc>>> fst_array_; }; // This class accumulates weights in a ReplaceFst. The 'Init' method takes as // input the argument used to build the ReplaceFst and the ReplaceFst state // table. It uses accumulators of type 'Accumulator' in the underlying FSTs. template <class Accumulator, class T = DefaultReplaceStateTable<typename Accumulator::Arc>> class ReplaceAccumulator { public: using Arc = typename Accumulator::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using StateTable = T; using StateTuple = typename StateTable::StateTuple; using Weight = typename Arc::Weight; ReplaceAccumulator() : init_(false), data_(std::make_shared< ReplaceAccumulatorData<Accumulator, StateTable>>()), error_(false) {} explicit ReplaceAccumulator(const std::vector<Accumulator *> &accumulators) : init_(false), data_(std::make_shared<ReplaceAccumulatorData<Accumulator, StateTable>>( accumulators)), error_(false) {} ReplaceAccumulator(const ReplaceAccumulator<Accumulator, StateTable> &acc, bool safe = false) : init_(acc.init_), data_(acc.data_), error_(acc.error_) { if (!init_) { FSTERROR() << "ReplaceAccumulator: Can't copy unintialized accumulator"; } if (safe) FSTERROR() << "ReplaceAccumulator: Safe copy not supported"; } // Does not take ownership of the state table, the state table is owned by // the ReplaceFst. void Init(const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_tuples, const StateTable *state_table) { init_ = true; data_->Init(fst_tuples, state_table); } // Method required by LookAheadMatcher. However, ReplaceAccumulator needs to // be initialized by calling the Init method above before being passed to // LookAheadMatcher. // // TODO(allauzen): Revisit this. Consider creating a method // Init(const ReplaceFst<A, T, C>&, bool) and using friendship to get access // to the innards of ReplaceFst. void Init(const Fst<Arc> &fst, bool copy = false) { if (!init_) { FSTERROR() << "ReplaceAccumulator::Init: Accumulator needs to be" << " initialized before being passed to LookAheadMatcher"; error_ = true; } } void SetState(StateId s) { if (!init_) { FSTERROR() << "ReplaceAccumulator::SetState: Incorrectly initialized"; error_ = true; return; } auto tuple = data_->GetTuple(s); fst_id_ = tuple.fst_id - 1; // Replace FST ID is 1-based. data_->GetAccumulator(fst_id_)->SetState(tuple.fst_state); if ((tuple.prefix_id != 0) && (data_->GetFst(fst_id_)->Final(tuple.fst_state) != Weight::Zero())) { offset_ = 1; offset_weight_ = data_->GetFst(fst_id_)->Final(tuple.fst_state); } else { offset_ = 0; offset_weight_ = Weight::Zero(); } aiter_.reset( new ArcIterator<Fst<Arc>>(*data_->GetFst(fst_id_), tuple.fst_state)); } Weight Sum(Weight w, Weight v) { if (error_) return Weight::NoWeight(); return data_->GetAccumulator(fst_id_)->Sum(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) { if (error_) return Weight::NoWeight(); auto sum = begin == end ? Weight::Zero() : data_->GetAccumulator(fst_id_)->Sum( w, aiter_.get(), begin ? begin - offset_ : 0, end - offset_); if (begin == 0 && end != 0 && offset_ > 0) sum = Sum(offset_weight_, sum); return sum; } bool Error() const { return error_; } private: bool init_; std::shared_ptr<ReplaceAccumulatorData<Accumulator, StateTable>> data_; Label fst_id_; size_t offset_; Weight offset_weight_; std::unique_ptr<ArcIterator<Fst<Arc>>> aiter_; bool error_; }; // SafeReplaceAccumulator accumulates weights in a ReplaceFst and copies of it // are always thread-safe copies. template <class Accumulator, class T> class SafeReplaceAccumulator { public: using Arc = typename Accumulator::Arc; using StateId = typename Arc::StateId; using Label = typename Arc::Label; using Weight = typename Arc::Weight; using StateTable = T; using StateTuple = typename StateTable::StateTuple; SafeReplaceAccumulator() {} SafeReplaceAccumulator(const SafeReplaceAccumulator &copy, bool safe) : SafeReplaceAccumulator(copy) {} explicit SafeReplaceAccumulator( const std::vector<Accumulator> &accumulators) { for (const auto &accumulator : accumulators) { accumulators_.emplace_back(accumulator, true); } } void Init(const std::vector<std::pair<Label, const Fst<Arc> *>> &fst_tuples, const StateTable *state_table) { state_table_ = state_table; for (Label i = 0; i < fst_tuples.size(); ++i) { if (i == accumulators_.size()) { accumulators_.resize(accumulators_.size() + 1); accumulators_[i].Init(*(fst_tuples[i].second)); } fst_array_.emplace_back(fst_tuples[i].second->Copy(true)); } init_ = true; } void Init(const Fst<Arc> &fst, bool copy = false) { if (!init_) { FSTERROR() << "SafeReplaceAccumulator::Init: Accumulator needs to be" << " initialized before being passed to LookAheadMatcher"; error_ = true; } } void SetState(StateId s) { auto tuple = state_table_->Tuple(s); fst_id_ = tuple.fst_id - 1; // Replace FST ID is 1-based GetAccumulator(fst_id_)->SetState(tuple.fst_state); offset_ = 0; offset_weight_ = Weight::Zero(); const auto final_weight = GetFst(fst_id_)->Final(tuple.fst_state); if ((tuple.prefix_id != 0) && (final_weight != Weight::Zero())) { offset_ = 1; offset_weight_ = final_weight; } aiter_.Set(*GetFst(fst_id_), tuple.fst_state); } Weight Sum(Weight w, Weight v) { if (error_) return Weight::NoWeight(); return GetAccumulator(fst_id_)->Sum(w, v); } template <class ArcIter> Weight Sum(Weight w, ArcIter *aiter, ssize_t begin, ssize_t end) { if (error_) return Weight::NoWeight(); if (begin == end) return Weight::Zero(); auto sum = GetAccumulator(fst_id_)->Sum( w, aiter_.get(), begin ? begin - offset_ : 0, end - offset_); if (begin == 0 && end != 0 && offset_ > 0) { sum = Sum(offset_weight_, sum); } return sum; } bool Error() const { return error_; } private: class ArcIteratorPtr { public: ArcIteratorPtr() {} ArcIteratorPtr(const ArcIteratorPtr &copy) {} void Set(const Fst<Arc> &fst, StateId state_id) { ptr_.reset(new ArcIterator<Fst<Arc>>(fst, state_id)); } ArcIterator<Fst<Arc>> *get() { return ptr_.get(); } private: std::unique_ptr<ArcIterator<Fst<Arc>>> ptr_; }; Accumulator *GetAccumulator(size_t i) { return &accumulators_[i]; } const Fst<Arc> *GetFst(size_t i) const { return fst_array_[i].get(); } const StateTable *state_table_; std::vector<Accumulator> accumulators_; std::vector<std::shared_ptr<Fst<Arc>>> fst_array_; ArcIteratorPtr aiter_; bool init_ = false; bool error_ = false; Label fst_id_; size_t offset_; Weight offset_weight_; }; } // namespace fst #endif // FST_ACCUMULATOR_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/extensions/compress/Makefile.am
AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) if HAVE_BIN bin_PROGRAMS = fstcompress fstrandmod LDADD = libfstcompressscript.la \ ../../script/libfstscript.la \ ../../lib/libfst.la \ -lm $(DL_LIBS) fstcompress_SOURCES = fstcompress.cc fstrandmod_SOURCES = fstrandmod.cc endif if HAVE_SCRIPT libfstcompressscript_la_SOURCES = compress-script.cc libfstcompressscript_la_LDFLAGS = -version-info 13:0:0 libfstcompressscript_la_LIBADD = \ ../../script/libfstscript.la \ ../../lib/libfst.la -lz -lm $(DL_LIBS) endif if HAVE_SCRIPT lib_LTLIBRARIES = libfstcompressscript.la endif
0
coqui_public_repos/STT/native_client/java
coqui_public_repos/STT/native_client/java/.idea/gradle.xml
<?xml version="1.0" encoding="UTF-8"?> <project version="4"> <component name="GradleSettings"> <option name="linkedExternalProjectsSettings"> <GradleProjectSettings> <option name="distributionType" value="DEFAULT_WRAPPED" /> <option name="externalProjectPath" value="$PROJECT_DIR$" /> <option name="modules"> <set> <option value="$PROJECT_DIR$" /> <option value="$PROJECT_DIR$/app" /> <option value="$PROJECT_DIR$/libdeepspeech" /> </set> </option> <option name="resolveModulePerSourceSet" value="false" /> </GradleProjectSettings> </option> </component> </project>
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/vad_transcriber/audioTranscript_gui.py
import sys import os import time import logging import traceback import numpy as np import wavTranscriber from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * import shlex import subprocess # Debug helpers logging.basicConfig(stream=sys.stderr, level=logging.DEBUG, format='%(filename)s - %(funcName)s@%(lineno)d %(name)s:%(levelname)s %(message)s') class WorkerSignals(QObject): ''' Defines the signals available from a running worker thread. Supported signals are: finished: No data error 'tuple' (ecxtype, value, traceback.format_exc()) result 'object' data returned from processing, anything progress 'object' indicating the transcribed result ''' finished = pyqtSignal() error = pyqtSignal(tuple) result = pyqtSignal(object) progress = pyqtSignal(object) class Worker(QRunnable): ''' Worker Thread Inherits from QRunnable to handle worker thread setup, signals and wrap-up @param callback: The funtion callback to run on this worker thread. Supplied args and kwargs will be passed through the runner. @type calllback: function @param args: Arguments to pass to the callback function @param kwargs: Keywords to pass to the callback function ''' def __init__(self, fn, *args, **kwargs): super(Worker, self).__init__() # Store the conctructor arguments (re-used for processing) self.fn = fn self.args = args self.kwargs = kwargs self.signals = WorkerSignals() # Add the callback to our kwargs self.kwargs['progress_callback'] = self.signals.progress @pyqtSlot() def run(self): ''' Initialise the runner function with the passed args, kwargs ''' # Retrieve args/kwargs here; and fire up the processing using them try: transcript = self.fn(*self.args, **self.kwargs) except: traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: # Return the result of the processing self.signals.result.emit(transcript) finally: # Done self.signals.finished.emit() class App(QMainWindow): dirName = "" def __init__(self): super().__init__() self.title = 'Deepspeech Transcriber' self.left = 10 self.top = 10 self.width = 480 self.height = 400 self.initUI() def initUI(self): self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height) layout = QGridLayout() layout.setSpacing(10) self.microphone = QRadioButton("Microphone") self.fileUpload = QRadioButton("File Upload") self.browseBox = QLineEdit(self, placeholderText="Wave File, Mono @ 16 kHz, 16bit Little-Endian") self.modelsBox = QLineEdit(self, placeholderText="Directory path for output_graph and scorer") self.textboxTranscript = QPlainTextEdit(self, placeholderText="Transcription") self.browseButton = QPushButton('Browse', self) self.browseButton.setToolTip('Select a wav file') self.modelsButton = QPushButton('Browse', self) self.modelsButton.setToolTip('Select stt models folder') self.transcribeWav = QPushButton('Transcribe Wav', self) self.transcribeWav.setToolTip('Start Wav Transcription') self.openMicrophone = QPushButton('Start Speaking', self) self.openMicrophone.setToolTip('Open Microphone') layout.addWidget(self.microphone, 0, 1, 1, 2) layout.addWidget(self.fileUpload, 0, 3, 1, 2) layout.addWidget(self.browseBox, 1, 0, 1, 4) layout.addWidget(self.browseButton, 1, 4) layout.addWidget(self.modelsBox, 2, 0, 1, 4) layout.addWidget(self.modelsButton, 2, 4) layout.addWidget(self.transcribeWav, 3, 1, 1, 1) layout.addWidget(self.openMicrophone, 3, 3, 1, 1) layout.addWidget(self.textboxTranscript, 5, 0, -1, 0) w = QWidget() w.setLayout(layout) self.setCentralWidget(w) # Microphone self.microphone.clicked.connect(self.mic_activate) # File Upload self.fileUpload.clicked.connect(self.wav_activate) # Connect Browse Button to Function on_click self.browseButton.clicked.connect(self.browse_on_click) # Connect the Models Button self.modelsButton.clicked.connect(self.models_on_click) # Connect Transcription button to threadpool self.transcribeWav.clicked.connect(self.transcriptionStart_on_click) # Connect Microphone button to threadpool self.openMicrophone.clicked.connect(self.openMicrophone_on_click) self.openMicrophone.setCheckable(True) self.openMicrophone.toggle() self.browseButton.setEnabled(False) self.browseBox.setEnabled(False) self.modelsBox.setEnabled(False) self.modelsButton.setEnabled(False) self.transcribeWav.setEnabled(False) self.openMicrophone.setEnabled(False) self.show() # Setup Threadpool self.threadpool = QThreadPool() logging.debug("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount()) @pyqtSlot() def mic_activate(self): logging.debug("Enable streaming widgets") self.en_mic = True self.browseButton.setEnabled(False) self.browseBox.setEnabled(False) self.modelsBox.setEnabled(True) self.modelsButton.setEnabled(True) self.transcribeWav.setEnabled(False) self.openMicrophone.setStyleSheet('QPushButton {background-color: #70cc7c; color: black;}') self.openMicrophone.setEnabled(True) @pyqtSlot() def wav_activate(self): logging.debug("Enable wav transcription widgets") self.en_mic = False self.openMicrophone.setStyleSheet('QPushButton {background-color: #f7f7f7; color: black;}') self.openMicrophone.setEnabled(False) self.browseButton.setEnabled(True) self.browseBox.setEnabled(True) self.modelsBox.setEnabled(True) self.modelsButton.setEnabled(True) @pyqtSlot() def browse_on_click(self): logging.debug('Browse button clicked') options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog self.fileName, _ = QFileDialog.getOpenFileName(self, "Select wav file to be Transcribed", "","All Files (*.wav)") if self.fileName: self.browseBox.setText(self.fileName) self.transcribeWav.setEnabled(True) logging.debug(self.fileName) @pyqtSlot() def models_on_click(self): logging.debug('Models Browse Button clicked') self.dirName = QFileDialog.getExistingDirectory(self, "Select stt models directory") if self.dirName: self.modelsBox.setText(self.dirName) logging.debug(self.dirName) # Threaded signal passing worker functions worker = Worker(self.modelWorker, self.dirName) worker.signals.result.connect(self.modelResult) worker.signals.finished.connect(self.modelFinish) worker.signals.progress.connect(self.modelProgress) # Execute self.threadpool.start(worker) else: logging.critical("*****************************************************") logging.critical("Model path not specified..") logging.critical("*****************************************************") return "Transcription Failed, models path not specified" def modelWorker(self, dirName, progress_callback): self.textboxTranscript.setPlainText("Loading Models...") self.openMicrophone.setStyleSheet('QPushButton {background-color: #f7f7f7; color: black;}') self.openMicrophone.setEnabled(False) self.show() time.sleep(1) return dirName def modelProgress(self, s): # FixMe: Write code to show progress here pass def modelResult(self, dirName): # Fetch and Resolve all the paths of model files output_graph, scorer = wavTranscriber.resolve_models(dirName) # Load output_graph, alphabet and scorer self.model = wavTranscriber.load_model(output_graph, scorer) def modelFinish(self): # self.timer.stop() self.textboxTranscript.setPlainText("Loaded Models, start transcribing") if self.en_mic is True: self.openMicrophone.setStyleSheet('QPushButton {background-color: #70cc7c; color: black;}') self.openMicrophone.setEnabled(True) self.show() @pyqtSlot() def transcriptionStart_on_click(self): logging.debug('Transcription Start button clicked') # Clear out older data self.textboxTranscript.setPlainText("") self.show() # Threaded signal passing worker functions worker = Worker(self.wavWorker, self.fileName) worker.signals.progress.connect(self.progress) worker.signals.result.connect(self.transcription) worker.signals.finished.connect(self.wavFinish) # Execute self.threadpool.start(worker) @pyqtSlot() def openMicrophone_on_click(self): logging.debug('Preparing to open microphone...') # Clear out older data self.textboxTranscript.setPlainText("") self.show() # Threaded signal passing worker functions # Prepare env for capturing from microphone and offload work to micWorker worker thread if (not self.openMicrophone.isChecked()): self.openMicrophone.setStyleSheet('QPushButton {background-color: #C60000; color: black;}') self.openMicrophone.setText("Stop") logging.debug("Start Recording pressed") logging.debug("Preparing for transcription...") sctx = self.model[0].createStream() subproc = subprocess.Popen(shlex.split('rec -q -V0 -e signed -L -c 1 -b 16 -r 16k -t raw - gain -2'), stdout=subprocess.PIPE, bufsize=0) self.textboxTranscript.insertPlainText('You can start speaking now\n\n') self.show() logging.debug('You can start speaking now') context = (sctx, subproc, self.model[0]) # Pass the state to streaming worker worker = Worker(self.micWorker, context) worker.signals.progress.connect(self.progress) worker.signals.result.connect(self.transcription) worker.signals.finished.connect(self.micFinish) # Execute self.threadpool.start(worker) else: logging.debug("Stop Recording") ''' Capture the audio stream from the microphone. The context is prepared by the openMicrophone_on_click() @param Context: Is a tuple containing three objects 1. Speech samples, sctx 2. subprocess handle 3. Deepspeech model object ''' def micWorker(self, context, progress_callback): # Deepspeech Streaming will be run from this method logging.debug("Recording from your microphone") while (not self.openMicrophone.isChecked()): data = context[1].stdout.read(512) context[0].feedAudioContent(np.frombuffer(data, np.int16)) else: transcript = context[0].finishStream() context[1].terminate() context[1].wait() self.show() progress_callback.emit(transcript) return "\n*********************\nTranscription Done..." def micFinish(self): self.openMicrophone.setText("Start Speaking") self.openMicrophone.setStyleSheet('QPushButton {background-color: #70cc7c; color: black;}') def transcription(self, out): logging.debug("%s" % out) self.textboxTranscript.insertPlainText(out) self.show() def wavFinish(self): logging.debug("File processed") def progress(self, chunk): logging.debug("Progress: %s" % chunk) self.textboxTranscript.insertPlainText(chunk) self.show() def wavWorker(self, waveFile, progress_callback): # Deepspeech will be run from this method logging.debug("Preparing for transcription...") inference_time = 0.0 # Run VAD on the input file segments, sample_rate, audio_length = wavTranscriber.vad_segment_generator(waveFile, 1) f = open(waveFile.rstrip(".wav") + ".txt", 'w') logging.debug("Saving Transcript @: %s" % waveFile.rstrip(".wav") + ".txt") for i, segment in enumerate(segments): # Run stt on the chunk that just completed VAD logging.debug("Processing chunk %002d" % (i,)) audio = np.frombuffer(segment, dtype=np.int16) output = wavTranscriber.stt(self.model[0], audio, sample_rate) inference_time += output[1] f.write(output[0] + " ") progress_callback.emit(output[0] + " ") # Summary of the files processed f.close() # Format pretty, extract filename from the full file path filename, ext = os.path.split(os.path.basename(waveFile)) title_names = ['Filename', 'Duration(s)', 'Inference Time(s)', 'Model Load Time(s)', 'Scorer Load Time(s)'] logging.debug("************************************************************************************************************") logging.debug("%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4])) logging.debug("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, self.model[1], self.model[2])) logging.debug("************************************************************************************************************") print("\n%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4])) print("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, self.model[1], self.model[2])) return "\n*********************\nTranscription Done..." def main(args): app = QApplication(sys.argv) w = App() sys.exit(app.exec_()) if __name__ == '__main__': main(sys.argv[1:])
0
coqui_public_repos/STT-examples/django_api_streaming
coqui_public_repos/STT-examples/django_api_streaming/example/settings.py
""" Django settings for this example project. Generated by 'django-admin startproject' using Django 2.0. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DATA_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '#-kfa^tjc6@bdpc5)d^yveabzd7_j!$ii5_ish66=cc!o3!bih' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"] # ['127.0.0.1', '10.210.22.166'] # Application definition INSTALLED_APPS = [ 'corsheaders', 'sslserver', 'bootstrap3', 'stt_app.apps.STT', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'channels', ] MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] CORS_ORIGIN_WHITELIST = ( 'localhost:8443', '127.0.0.1:8443', '10.210.22.166:8443' ) CORS_ALLOW_HEADERS = ( 'accept', 'accept-encoding', 'authorization', 'content-type', 'dnt', 'origin', 'user-agent', 'x-csrftoken', 'x-requested-with', 'x-csrf-token', 'cip', 'isajaxrequest', ) # CORS_URLS_REGEX = r'^/handleaudio/.*$' #settings for channels - Begin CHANNEL_LAYERS = { "default": { "BACKEND": "asgiref.inmemory.ChannelLayer", "ROUTING": "stt_app.routing.channel_routing", }, } #settings for channels - Begin ROOT_URLCONF = 'example.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'example.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(DATA_DIR, 'static')
0
coqui_public_repos/STT-models/indonesian/itml
coqui_public_repos/STT-models/indonesian/itml/v0.1.0/MODEL_CARD.md
# Model card for Indonesian STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group. - Model language: Indonesian / Bahasa indonesia / `id` - Model date: April 9, 2021 - Model type: `Speech-to-Text` - Model version: `v0.1.0` - Compatible with 🐸 STT version: `v0.9.3` - License: AGPL - Citation details: `@techreport{indonesian-stt, author = {Tyers,Francis}, title = {Indonesian STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-ID-0.1} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [Indonesian Language](https://en.wikipedia.org/wiki/Indonesian_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/id/). |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|89.7\%|30.3\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on Common Voice 6.1 train. ## Evaluation data The Model was evaluated on Common Voice 6.1 test. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT
coqui_public_repos/STT/bin/ops_in_graph.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import tensorflow.compat.v1 as tfv1 def main(): with tfv1.gfile.FastGFile(sys.argv[1], "rb") as fin: graph_def = tfv1.GraphDef() graph_def.ParseFromString(fin.read()) print("\n".join(sorted(set(n.op for n in graph_def.node)))) if __name__ == "__main__": main()
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/union.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to compute the union of two FSTs. #ifndef FST_UNION_H_ #define FST_UNION_H_ #include <algorithm> #include <vector> #include <fst/mutable-fst.h> #include <fst/rational.h> namespace fst { // Computes the union (sum) of two FSTs. This version writes the union to an // output MutableFst. If A transduces string x to y with weight a and B // transduces string w to v with weight b, then their union transduces x to y // with weight a and w to v with weight b. // // Complexity: // // Time: (V_2 + E_2) // Space: O(V_2 + E_2) // // where Vi is the number of states, and Ei is the number of arcs, in the ith // FST. template <class Arc> void Union(MutableFst<Arc> *fst1, const Fst<Arc> &fst2) { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; // Checks for symbol table compatibility. if (!CompatSymbols(fst1->InputSymbols(), fst2.InputSymbols()) || !CompatSymbols(fst1->OutputSymbols(), fst2.OutputSymbols())) { FSTERROR() << "Union: Input/output symbol tables of 1st argument " << "do not match input/output symbol tables of 2nd argument"; fst1->SetProperties(kError, kError); return; } const auto numstates1 = fst1->NumStates(); const bool initial_acyclic1 = fst1->Properties(kInitialAcyclic, true); const auto props1 = fst1->Properties(kFstProperties, false); const auto props2 = fst2.Properties(kFstProperties, false); const auto start2 = fst2.Start(); if (start2 == kNoStateId) { if (props2 & kError) fst1->SetProperties(kError, kError); return; } if (fst2.Properties(kExpanded, false)) { fst1->ReserveStates(numstates1 + CountStates(fst2) + (initial_acyclic1 ? 0 : 1)); } for (StateIterator<Fst<Arc>> siter(fst2); !siter.Done(); siter.Next()) { const auto s1 = fst1->AddState(); const auto s2 = siter.Value(); fst1->SetFinal(s1, fst2.Final(s2)); fst1->ReserveArcs(s1, fst2.NumArcs(s2)); for (ArcIterator<Fst<Arc>> aiter(fst2, s2); !aiter.Done(); aiter.Next()) { auto arc = aiter.Value(); // Copy intended. arc.nextstate += numstates1; fst1->AddArc(s1, arc); } } const auto start1 = fst1->Start(); if (start1 == kNoStateId) { fst1->SetStart(start2); fst1->SetProperties(props2, kCopyProperties); return; } if (initial_acyclic1) { fst1->AddArc(start1, Arc(0, 0, Weight::One(), start2 + numstates1)); } else { const auto nstart1 = fst1->AddState(); fst1->SetStart(nstart1); fst1->AddArc(nstart1, Arc(0, 0, Weight::One(), start1)); fst1->AddArc(nstart1, Arc(0, 0, Weight::One(), start2 + numstates1)); } fst1->SetProperties(UnionProperties(props1, props2), kFstProperties); } // Computes the union of two FSTs, modifying the RationalFst argument. template <class Arc> void Union(RationalFst<Arc> *fst1, const Fst<Arc> &fst2) { fst1->GetMutableImpl()->AddUnion(fst2); } using UnionFstOptions = RationalFstOptions; // Computes the union (sum) of two FSTs. This version is a delayed FST. If A // transduces string x to y with weight a and B transduces string w to v with // weight b, then their union transduces x to y with weight a and w to v with // weight b. // // Complexity: // // Time: O(v_1 + e_1 + v_2 + e_2) // Space: O(v_1 + v_2) // // where vi is the number of states visited, and ei is the number of arcs // visited, in the ith FST. Constant time and space to visit an input state or // arc is assumed and exclusive of caching. template <class A> class UnionFst : public RationalFst<A> { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; UnionFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2) { GetMutableImpl()->InitUnion(fst1, fst2); } UnionFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const UnionFstOptions &opts) : RationalFst<Arc>(opts) { GetMutableImpl()->InitUnion(fst1, fst2); } // See Fst<>::Copy() for doc. UnionFst(const UnionFst<Arc> &fst, bool safe = false) : RationalFst<Arc>(fst, safe) {} // Gets a copy of this UnionFst. See Fst<>::Copy() for further doc. UnionFst<Arc> *Copy(bool safe = false) const override { return new UnionFst<Arc>(*this, safe); } private: using ImplToFst<internal::RationalFstImpl<Arc>>::GetImpl; using ImplToFst<internal::RationalFstImpl<Arc>>::GetMutableImpl; }; // Specialization for UnionFst. template <class Arc> class StateIterator<UnionFst<Arc>> : public StateIterator<RationalFst<Arc>> { public: explicit StateIterator(const UnionFst<Arc> &fst) : StateIterator<RationalFst<Arc>>(fst) {} }; // Specialization for UnionFst. template <class Arc> class ArcIterator<UnionFst<Arc>> : public ArcIterator<RationalFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const UnionFst<Arc> &fst, StateId s) : ArcIterator<RationalFst<Arc>>(fst, s) {} }; using StdUnionFst = UnionFst<StdArc>; } // namespace fst #endif // FST_UNION_H_
0
coqui_public_repos/STT-examples
coqui_public_repos/STT-examples/mic_vad_streaming/test.sh
#!/bin/bash set -xe THIS=$(dirname "$0") pushd ${THIS} source ../tests.sh pip install --user $(get_python_wheel_url "$1") pip install --user -r <(grep -v stt requirements.txt) pulseaudio & python mic_vad_streaming.py \ --model $HOME/STT/models/model.tflite \ --scorer $HOME/STT/models/huge-vocab.scorer \ --file $HOME/STT/audio/2830-3980-0043.wav popd
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/tc-node-tests.sh
#!/bin/bash set -xe source $(dirname "$0")/tc-tests-utils.sh nodever=$1 if [ -z "${nodever}" ]; then echo "No node version given, aborting." exit 1 fi; bitrate=$2 set_ldc_sample_filename "${bitrate}" download_data node --version npm --version NODE_ROOT="${DS_ROOT_TASK}/ds-test/" NODE_CACHE="${DS_ROOT_TASK}/ds-test.cache/" export NODE_PATH="${NODE_ROOT}/node_modules/" export PATH="${NODE_ROOT}:${NODE_PATH}/.bin/:$PATH" # make sure that NODE_ROOT really exists mkdir -p ${NODE_ROOT} deepspeech_npm_url=$(get_dep_npm_pkg_url) npm install --prefix ${NODE_ROOT} --cache ${NODE_CACHE} ${deepspeech_npm_url} check_runtime_nodejs ensure_cuda_usage "$3" run_all_inference_tests run_js_streaming_inference_tests run_hotword_tests
0
coqui_public_repos/STT/native_client/java/libstt/src/main/java/ai/coqui
coqui_public_repos/STT/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/TokenMetadata.java
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.2 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package ai.coqui.libstt; /** * Stores text of an individual token, along with its timing information */ public class TokenMetadata { private transient long swigCPtr; protected transient boolean swigCMemOwn; protected TokenMetadata(long cPtr, boolean cMemoryOwn) { swigCMemOwn = cMemoryOwn; swigCPtr = cPtr; } protected static long getCPtr(TokenMetadata obj) { return (obj == null) ? 0 : obj.swigCPtr; } public synchronized void delete() { if (swigCPtr != 0) { if (swigCMemOwn) { swigCMemOwn = false; throw new UnsupportedOperationException("C++ destructor does not have public access"); } swigCPtr = 0; } } /** * The text corresponding to this token */ public String getText() { return implJNI.TokenMetadata_Text_get(swigCPtr, this); } /** * Position of the token in units of 20ms */ public long getTimestep() { return implJNI.TokenMetadata_Timestep_get(swigCPtr, this); } /** * Position of the token in seconds */ public float getStartTime() { return implJNI.TokenMetadata_StartTime_get(swigCPtr, this); } }
0
coqui_public_repos/STT-examples/electron
coqui_public_repos/STT-examples/electron/public/download.js
const request = require('request'); const fs = require('fs'); // generic http download function download(url, dest, callback) { var file = fs.createWriteStream(dest); console.log('Downloading:', url); const sendReq = request.get(url); sendReq.on('response', (response) => { if (response.statusCode === 200) { console.log('PLEASE WAIT...'); sendReq.pipe(file); } }); file.on('finish', () => { file.close(); console.log('Saved:', dest); callback(); }); } module.exports = download;
0
coqui_public_repos/STT-models/dhivehi/itml
coqui_public_repos/STT-models/dhivehi/itml/v0.1.0/LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <https://www.gnu.org/licenses/>.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/include/fst/script/synchronize.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_SYNCHRONIZE_H_ #define FST_SCRIPT_SYNCHRONIZE_H_ #include <utility> #include <fst/synchronize.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using SynchronizeArgs = std::pair<const FstClass &, MutableFstClass *>; template <class Arc> void Synchronize(SynchronizeArgs *args) { const Fst<Arc> &ifst = *(std::get<0>(*args).GetFst<Arc>()); MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>(); Synchronize(ifst, ofst); } void Synchronize(const FstClass &ifst, MutableFstClass *ofst); } // namespace script } // namespace fst #endif // FST_SCRIPT_SYNCHRONIZE_H_
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/external/rapidjson/stringbuffer.h
// Tencent is pleased to support the open source community by making RapidJSON available. // // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://opensource.org/licenses/MIT // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef CEREAL_RAPIDJSON_STRINGBUFFER_H_ #define CEREAL_RAPIDJSON_STRINGBUFFER_H_ #include "stream.h" #include "internal/stack.h" #if CEREAL_RAPIDJSON_HAS_CXX11_RVALUE_REFS #include <utility> // std::move #endif #include "internal/stack.h" #if defined(__clang__) CEREAL_RAPIDJSON_DIAG_PUSH CEREAL_RAPIDJSON_DIAG_OFF(c++98-compat) #endif CEREAL_RAPIDJSON_NAMESPACE_BEGIN //! Represents an in-memory output stream. /*! \tparam Encoding Encoding of the stream. \tparam Allocator type for allocating memory buffer. \note implements Stream concept */ template <typename Encoding, typename Allocator = CrtAllocator> class GenericStringBuffer { public: typedef typename Encoding::Ch Ch; GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} #if CEREAL_RAPIDJSON_HAS_CXX11_RVALUE_REFS GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {} GenericStringBuffer& operator=(GenericStringBuffer&& rhs) { if (&rhs != this) stack_ = std::move(rhs.stack_); return *this; } #endif void Put(Ch c) { *stack_.template Push<Ch>() = c; } void PutUnsafe(Ch c) { *stack_.template PushUnsafe<Ch>() = c; } void Flush() {} void Clear() { stack_.Clear(); } void ShrinkToFit() { // Push and pop a null terminator. This is safe. *stack_.template Push<Ch>() = '\0'; stack_.ShrinkToFit(); stack_.template Pop<Ch>(1); } void Reserve(size_t count) { stack_.template Reserve<Ch>(count); } Ch* Push(size_t count) { return stack_.template Push<Ch>(count); } Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe<Ch>(count); } void Pop(size_t count) { stack_.template Pop<Ch>(count); } const Ch* GetString() const { // Push and pop a null terminator. This is safe. *stack_.template Push<Ch>() = '\0'; stack_.template Pop<Ch>(1); return stack_.template Bottom<Ch>(); } //! Get the size of string in bytes in the string buffer. size_t GetSize() const { return stack_.GetSize(); } //! Get the length of string in Ch in the string buffer. size_t GetLength() const { return stack_.GetSize() / sizeof(Ch); } static const size_t kDefaultCapacity = 256; mutable internal::Stack<Allocator> stack_; private: // Prohibit copy constructor & assignment operator. GenericStringBuffer(const GenericStringBuffer&); GenericStringBuffer& operator=(const GenericStringBuffer&); }; //! String buffer with UTF8 encoding typedef GenericStringBuffer<UTF8<> > StringBuffer; template<typename Encoding, typename Allocator> inline void PutReserve(GenericStringBuffer<Encoding, Allocator>& stream, size_t count) { stream.Reserve(count); } template<typename Encoding, typename Allocator> inline void PutUnsafe(GenericStringBuffer<Encoding, Allocator>& stream, typename Encoding::Ch c) { stream.PutUnsafe(c); } //! Implement specialized version of PutN() with memset() for better performance. template<> inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) { std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c)); } CEREAL_RAPIDJSON_NAMESPACE_END #if defined(__clang__) CEREAL_RAPIDJSON_DIAG_POP #endif #endif // CEREAL_RAPIDJSON_STRINGBUFFER_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/lib/symbol-table-ops.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // #include <fst/symbol-table-ops.h> #include <string> namespace fst { SymbolTable *MergeSymbolTable(const SymbolTable &left, const SymbolTable &right, bool *right_relabel_output) { // MergeSymbolTable detects several special cases. It will return a reference // copied version of SymbolTable of left or right if either symbol table is // a superset of the other. std::unique_ptr<SymbolTable> merged( new SymbolTable("merge_" + left.Name() + "_" + right.Name())); // Copies everything from the left symbol table. bool left_has_all = true; bool right_has_all = true; bool relabel = false; for (SymbolTableIterator liter(left); !liter.Done(); liter.Next()) { merged->AddSymbol(liter.Symbol(), liter.Value()); if (right_has_all) { int64_t key = right.Find(liter.Symbol()); if (key == -1) { right_has_all = false; } else if (!relabel && key != liter.Value()) { relabel = true; } } } if (right_has_all) { if (right_relabel_output) *right_relabel_output = relabel; return right.Copy(); } // add all symbols we can from right symbol table std::vector<string> conflicts; for (SymbolTableIterator riter(right); !riter.Done(); riter.Next()) { int64_t key = merged->Find(riter.Symbol()); if (key != -1) { // Symbol already exists, maybe with different value if (key != riter.Value()) relabel = true; continue; } // Symbol doesn't exist from left left_has_all = false; if (!merged->Find(riter.Value()).empty()) { // we can't add this where we want to, add it later, in order conflicts.push_back(riter.Symbol()); continue; } // there is a hole and we can add this symbol with its id merged->AddSymbol(riter.Symbol(), riter.Value()); } if (right_relabel_output) *right_relabel_output = relabel; if (left_has_all) return left.Copy(); // Add all symbols that conflicted, in order for (const auto &conflict : conflicts) merged->AddSymbol(conflict); return merged.release(); } SymbolTable *CompactSymbolTable(const SymbolTable &syms) { std::map<int64_t, string> sorted; SymbolTableIterator stiter(syms); for (; !stiter.Done(); stiter.Next()) { sorted[stiter.Value()] = stiter.Symbol(); } auto *compact = new SymbolTable(syms.Name() + "_compact"); int64_t newkey = 0; for (const auto &kv : sorted) compact->AddSymbol(kv.second, newkey++); return compact; } SymbolTable *FstReadSymbols(const string &filename, bool input_symbols) { std::ifstream in(filename, std::ios_base::in | std::ios_base::binary); if (!in) { LOG(ERROR) << "FstReadSymbols: Can't open file " << filename; return nullptr; } FstHeader hdr; if (!hdr.Read(in, filename)) { LOG(ERROR) << "FstReadSymbols: Couldn't read header from " << filename; return nullptr; } if (hdr.GetFlags() & FstHeader::HAS_ISYMBOLS) { std::unique_ptr<SymbolTable> isymbols(SymbolTable::Read(in, filename)); if (isymbols == nullptr) { LOG(ERROR) << "FstReadSymbols: Couldn't read input symbols from " << filename; return nullptr; } if (input_symbols) return isymbols.release(); } if (hdr.GetFlags() & FstHeader::HAS_OSYMBOLS) { std::unique_ptr<SymbolTable> osymbols(SymbolTable::Read(in, filename)); if (osymbols == nullptr) { LOG(ERROR) << "FstReadSymbols: Couldn't read output symbols from " << filename; return nullptr; } if (!input_symbols) return osymbols.release(); } LOG(ERROR) << "FstReadSymbols: The file " << filename << " doesn't contain the requested symbols"; return nullptr; } bool AddAuxiliarySymbols(const string &prefix, int64_t start_label, int64_t nlabels, SymbolTable *syms) { for (int64_t i = 0; i < nlabels; ++i) { auto index = i + start_label; if (index != syms->AddSymbol(prefix + std::to_string(i), index)) { FSTERROR() << "AddAuxiliarySymbols: Symbol table clash"; return false; } } return true; } } // namespace fst
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/kenlm/Doxyfile
# Doxyfile 1.6.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = KenLM # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = YES # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = YES # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command <command> <input-file>, where <command> is the value of # the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = lm lm/builder lm/filter lm/interpolate lm/wrappers util util/double-conversion util/stream # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command <filter> <input-file>, where <filter> # is the value of the INPUT_FILTER tag, and <input-file> is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # If the HTML_TIMESTAMP tag is set to YES then the generated HTML # documentation will contain the timesstamp. HTML_TIMESTAMP = NO # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) # there is already a search function so this one should typically # be disabled. SEARCHENGINE = YES #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES
0
coqui_public_repos/STT
coqui_public_repos/STT/ci_scripts/electronjs_tflite-tests-prod.sh
#!/bin/bash set -xe source $(dirname "$0")/all-vars.sh source $(dirname "$0")/all-utils.sh source $(dirname "$0")/asserts.sh samplerate=$1 ldc93s1_sample_filename="LDC93S1_pcms16le_1_${samplerate}.wav" model_source=${STT_PROD_MODEL} model_name=$(basename "${model_source}") download_model_prod download_data node --version npm --version symlink_electron export_node_bin_path which electron which node if [ "${OS}" = "Linux" ]; then export DISPLAY=':99.0' sudo Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 & xvfb_process=$! fi node --version stt --version check_runtime_electronjs run_electronjs_prodtflite_inference_tests "${samplerate}" if [ "${OS}" = "Linux" ]; then sleep 1 sudo kill -9 ${xvfb_process} || true fi
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/m4/lt~obsolete.m4
# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software # Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 5 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])])
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/vocoder_tests/test_vocoder_parallel_wavegan_generator.py
import numpy as np import torch from TTS.vocoder.models.parallel_wavegan_generator import ParallelWaveganGenerator def test_pwgan_generator(): model = ParallelWaveganGenerator( in_channels=1, out_channels=1, kernel_size=3, num_res_blocks=30, stacks=3, res_channels=64, gate_channels=128, skip_channels=64, aux_channels=80, dropout=0.0, bias=True, use_weight_norm=True, upsample_factors=[4, 4, 4, 4], ) dummy_c = torch.rand((2, 80, 5)) output = model(dummy_c) assert np.all(output.shape == (2, 1, 5 * 256)), output.shape model.remove_weight_norm() output = model.inference(dummy_c) assert np.all(output.shape == (2, 1, (5 + 4) * 256))
0
coqui_public_repos
coqui_public_repos/open-bible-scripts/split_verse_lingala.py
# Imports import os, re import json import argparse import time from collections import defaultdict import pandas as pd if __name__ == '__main__': parser = argparse.ArgumentParser(description="Run verse split pipeline") parser.add_argument("-wav_folder", "--path_to_wavs", default="data/lnOMNB20_MAT_wav/") parser.add_argument("-timing_folder", "--path_to_timings", default="data/lnOMNB20_timingfiles/timingfiles/MAT/") parser.add_argument("-book_sfm", "--path_to_book_sfm", default="data/lnOMNB20_USFM/41MATlnOMNB20.SFM") parser.add_argument("-output", "--output", default="data/MAT/") args = parser.parse_args() path_to_wavs = args.path_to_wavs path_to_timings = args.path_to_timings path_to_book_sfm = args.path_to_book_sfm output = args.output if not os.path.exists(f"{output}"): os.makedirs(f"{output}") dict_chap_verse = defaultdict(lambda : []) current_chap = None current_verse = None # Open file for read with open(f'{path_to_book_sfm}', 'r') as f: for textline in f: current_txt = textline.split() if len(current_txt) == 0: continue if current_txt[0] =='\\c': current_chap = current_txt[1] current_verse = None continue if current_txt[0] =='\\v': current_verse = current_txt[1] # TODO: Are we not missing some aspect of the language here ? content = re.sub(r"[^a-zA-Z0-9?'’‘´`-]+", ' ', textline[len(current_txt[0]+current_txt[1])+2:]).strip() dict_chap_verse[current_chap].append(content) elif len(current_txt) == 1: continue elif current_chap and current_verse: content = re.sub(r"[^a-zA-Z0-9?'’‘´`-]+", ' ', textline[len(current_txt[0])+2:]).strip() dict_chap_verse[current_chap][int(current_verse)-1] += " " + content # verse_time = textline.split("\t") for file in os.listdir(path_to_wavs): book_chap, ext = file.split('.') if ext != 'wav': continue book, chap = book_chap.split('_') # Global dictionary to keep verse, [time_start, time_end] dict_verse_time = defaultdict(lambda : []) # open the and read file on in the first repository with open(f'{path_to_timings}{book_chap}.txt', 'r') as f: # Open file for read for textline in f: verse_time = textline.split("\t") # This handles the file version case if len(verse_time) == 1 or len(verse_time[0].split()) == 1: continue else: # This skips the Chapter Title and Headings verse, number = verse_time[0].split() if verse != "Verse": continue else: time = verse_time[1] dict_verse_time[f'{verse}_{number.zfill(3)}'].append(time) if int(number)-1==0: pass else: dict_verse_time[f'{verse}_{str(int(number)-1).zfill(3)}'].append(time) for verse_key in dict_verse_time: audio = f"{path_to_wavs}{file}" output_file = f"{output}{book_chap}_{verse_key}.wav" if len(dict_verse_time[verse_key])==2: os.system(f"sox {audio} {output_file} trim {dict_verse_time[verse_key][0]} ={dict_verse_time[verse_key][1]}") else: os.system(f"sox {audio} {output_file} trim {dict_verse_time[verse_key][0]}") with open(f'{output}{book_chap}_{verse_key}.txt', "w", encoding="utf-8") as text_file: text_file.write(dict_chap_verse[str(int(chap))][int(verse_key.split('_')[1])-1]) text_file.write("\n") # continue
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/extensions/compress/elias.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Compresses and decompresses unweighted FSTs. #ifndef FST_EXTENSIONS_COMPRESS_ELIAS_H_ #define FST_EXTENSIONS_COMPRESS_ELIAS_H_ #include <stack> #include <vector> #include <fst/compat.h> namespace fst { template <class Var> class Elias { public: // Gamma encoding is a subroutine for Delta encoding static void GammaEncode(const Var &input, std::vector<bool> *code); // Elias Delta encoding for a single integer static void DeltaEncode(const Var &input, std::vector<bool> *code); // Batch decoding of a set of integers static void BatchDecode(const std::vector<bool> &input, std::vector<Var> *output); }; template <class Var> void Elias<Var>::GammaEncode(const Var &input, std::vector<bool> *code) { Var input_copy = input; std::stack<bool> reverse_code; while (input_copy > 0) { reverse_code.push(input_copy % 2); input_copy = input_copy / 2; } for (Var auxvar = 0; auxvar < reverse_code.size() - 1; auxvar++) code->push_back(0); while (reverse_code.empty() != 1) { code->push_back(reverse_code.top()); reverse_code.pop(); } } template <class Var> void Elias<Var>::DeltaEncode(const Var &input, std::vector<bool> *code) { Var input_copy = input + 1; std::stack<bool> reverse_remainder; Var auxvar = 0; while (input_copy != 0) { reverse_remainder.push(input_copy % 2); input_copy = input_copy / 2; auxvar = auxvar + 1; } GammaEncode(auxvar, code); reverse_remainder.pop(); while (reverse_remainder.empty() != 1) { code->push_back(reverse_remainder.top()); reverse_remainder.pop(); } } template <class Var> void Elias<Var>::BatchDecode(const std::vector<bool> &input, std::vector<Var> *output) { Var lead_zeros = 0; Var remainder_bits = 0; Var current_word = 1; Var value = 1; std::vector<bool>::const_iterator it = input.begin(); while (it != input.end()) { lead_zeros = 0; remainder_bits = 0; current_word = 1; value = 1; while (*it != 1) { it++; lead_zeros++; } it++; while (lead_zeros > 0) { lead_zeros--; current_word = 2 * current_word + *it; it++; } current_word--; while (current_word > 0) { value = 2 * value + *it; current_word--; it++; } output->push_back(value - 1); } } } // namespace fst #endif // FST_EXTENSIONS_COMPRESS_ELIAS_H_
0
coqui_public_repos/inference-engine/third_party/kenlm/util
coqui_public_repos/inference-engine/third_party/kenlm/util/double-conversion/LICENSE
Copyright 2006-2011, the V8 project authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/kenlm_multistrap_arm64_buster.conf
[General] arch=arm64 noauth=true unpack=true debootstrap=Debian aptsources=Debian cleanup=true [Debian] packages=libc6 libc6-dev libstdc++-7-dev linux-libc-dev libboost-dev zlib1g-dev libbz2-dev liblzma-dev libboost-program-options-dev libboost-system-dev libboost-thread-dev libboost-test-dev source=http://deb.debian.org/debian keyring=debian-archive-keyring components=main suite=buster
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/tc-cpp_tflite-ds-tests-prod.sh
#!/bin/bash set -xe source $(dirname "$0")/tc-tests-utils.sh bitrate=$1 set_ldc_sample_filename "${bitrate}" model_source=${DEEPSPEECH_PROD_MODEL//.pb/.tflite} model_name=$(basename "${model_source}") model_name_mmap=$(basename "${model_source}") model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP//.pbmm/.tflite} export DATA_TMP_DIR=${TASKCLUSTER_TMP_DIR} download_material "${TASKCLUSTER_TMP_DIR}/ds" export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH check_versions run_prodtflite_inference_tests "${bitrate}"
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstsynchronize-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Synchronizes an FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/script/synchronize.h> int fstsynchronize_main(int argc, char **argv) { namespace s = fst::script; using fst::script::FstClass; using fst::script::VectorFstClass; string usage = "Synchronizes an FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out.fst]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 3) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out_name = argc > 2 ? argv[2] : ""; std::unique_ptr<FstClass> ifst(FstClass::Read(in_name)); if (!ifst) return 1; VectorFstClass ofst(ifst->ArcType()); s::Synchronize(*ifst, &ofst); return !ofst.Write(out_name); }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/weight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // General weight set and associated semiring operation definitions. #ifndef FST_WEIGHT_H_ #define FST_WEIGHT_H_ #include <cctype> #include <cmath> #include <iostream> #include <sstream> #include <type_traits> #include <utility> #include <fst/compat.h> #include <fst/log.h> #include <fst/util.h> DECLARE_string(fst_weight_parentheses); DECLARE_string(fst_weight_separator); namespace fst { // A semiring is specified by two binary operations Plus and Times and two // designated elements Zero and One with the following properties: // // Plus: associative, commutative, and has Zero as its identity. // // Times: associative and has identity One, distributes w.r.t. Plus, and // has Zero as an annihilator: // Times(Zero(), a) == Times(a, Zero()) = Zero(). // // A left semiring distributes on the left; a right semiring is similarly // defined. // // A Weight class must have binary functions Plus and Times and static member // functions Zero() and One() and these must form (at least) a left or right // semiring. // // In addition, the following should be defined for a Weight: // // Member: predicate on set membership. // // NoWeight: static member function that returns an element that is // not a set member; used to signal an error. // // >>: reads textual representation of a weight. // // <<: prints textual representation of a weight. // // Read(istream &istrm): reads binary representation of a weight. // // Write(ostream &ostrm): writes binary representation of a weight. // // Hash: maps weight to size_t. // // ApproxEqual: approximate equality (for inexact weights) // // Quantize: quantizes w.r.t delta (for inexact weights) // // Divide: for all a, b, c s.t. Times(a, b) == c // // --> b' = Divide(c, a, DIVIDE_LEFT) if a left semiring, b'.Member() // and Times(a, b') == c // --> a' = Divide(c, b, DIVIDE_RIGHT) if a right semiring, a'.Member() // and Times(a', b) == c // --> b' = Divide(c, a) = Divide(c, a, DIVIDE_ANY) = // Divide(c, a, DIVIDE_LEFT) = Divide(c, a, DIVIDE_RIGHT) if a // commutative semiring, b'.Member() and Times(a, b') = Times(b', a) = c // // ReverseWeight: the type of the corresponding reverse weight. // // Typically the same type as Weight for a (both left and right) semiring. // For the left string semiring, it is the right string semiring. // // Reverse: a mapping from Weight to ReverseWeight s.t. // // --> Reverse(Reverse(a)) = a // --> Reverse(Plus(a, b)) = Plus(Reverse(a), Reverse(b)) // --> Reverse(Times(a, b)) = Times(Reverse(b), Reverse(a)) // Typically the identity mapping in a (both left and right) semiring. // In the left string semiring, it maps to the reverse string in the right // string semiring. // // Properties: specifies additional properties that hold: // LeftSemiring: indicates weights form a left semiring. // RightSemiring: indicates weights form a right semiring. // Commutative: for all a,b: Times(a,b) == Times(b,a) // Idempotent: for all a: Plus(a, a) == a. // Path: for all a, b: Plus(a, b) == a or Plus(a, b) == b. // CONSTANT DEFINITIONS // A representable float near .001. constexpr float kDelta = 1.0F / 1024.0F; // For all a, b, c: Times(c, Plus(a, b)) = Plus(Times(c, a), Times(c, b)). constexpr uint64_t kLeftSemiring = 0x0000000000000001ULL; // For all a, b, c: Times(Plus(a, b), c) = Plus(Times(a, c), Times(b, c)). constexpr uint64_t kRightSemiring = 0x0000000000000002ULL; constexpr uint64_t kSemiring = kLeftSemiring | kRightSemiring; // For all a, b: Times(a, b) = Times(b, a). constexpr uint64_t kCommutative = 0x0000000000000004ULL; // For all a: Plus(a, a) = a. constexpr uint64_t kIdempotent = 0x0000000000000008ULL; // For all a, b: Plus(a, b) = a or Plus(a, b) = b. constexpr uint64_t kPath = 0x0000000000000010ULL; // For random weight generation: default number of distinct weights. // This is also used for a few other weight generation defaults. constexpr size_t kNumRandomWeights = 5; // Weight property boolean constants needed for SFINAE. // MSVC compiler bug workaround: an expression containing W::Properties() cannot // be directly used as a value argument to std::enable_if or integral_constant. // WeightPropertiesThunk<W>::Properties works instead, however. namespace bug { template <class W> struct WeightPropertiesThunk { WeightPropertiesThunk() = delete; constexpr static const uint64_t Properties = W::Properties(); }; template <class W, uint64_t props> using TestWeightProperties = std::integral_constant<bool, (WeightPropertiesThunk<W>::Properties & props) == props>; } // namespace bug template <class W> using IsIdempotent = bug::TestWeightProperties<W, kIdempotent>; template <class W> using IsPath = bug::TestWeightProperties<W, kPath>; // Determines direction of division. enum DivideType { DIVIDE_LEFT, // left division DIVIDE_RIGHT, // right division DIVIDE_ANY }; // division in a commutative semiring // NATURAL ORDER // // By definition: // // a <= b iff a + b = a // // The natural order is a negative partial order iff the semiring is // idempotent. It is trivially monotonic for plus. It is left // (resp. right) monotonic for times iff the semiring is left // (resp. right) distributive. It is a total order iff the semiring // has the path property. // // For more information, see: // // Mohri, M. 2002. Semiring framework and algorithms for shortest-distance // problems, Journal of Automata, Languages and // Combinatorics 7(3): 321-350, 2002. // // We define the strict version of this order below. template <class W> class NaturalLess { public: using Weight = W; NaturalLess() { if (!(W::Properties() & kIdempotent)) { FSTERROR() << "NaturalLess: Weight type is not idempotent: " << W::Type(); } } bool operator()(const W &w1, const W &w2) const { return (Plus(w1, w2) == w1) && w1 != w2; } }; // Power is the iterated product for arbitrary semirings such that Power(w, 0) // is One() for the semiring, and Power(w, n) = Times(Power(w, n - 1), w). template <class Weight> Weight Power(const Weight &weight, size_t n) { auto result = Weight::One(); for (size_t i = 0; i < n; ++i) result = Times(result, weight); return result; } // Simple default adder class. Specializations might be more complex. template <class Weight> class Adder { public: explicit Adder(Weight w = Weight::Zero()) : sum_(w) { } Weight Add(const Weight &w) { sum_ = Plus(sum_, w); return sum_; } Weight Sum() { return sum_; } void Reset(Weight w = Weight::Zero()) { sum_ = w; } private: Weight sum_; }; // General weight converter: raises error. template <class W1, class W2> struct WeightConvert { W2 operator()(W1 w1) const { FSTERROR() << "WeightConvert: Can't convert weight from \"" << W1::Type() << "\" to \"" << W2::Type(); return W2::NoWeight(); } }; // Specialized weight converter to self. template <class W> struct WeightConvert<W, W> { W operator()(W weight) const { return weight; } }; // General random weight generator: raises error. template <class W> struct WeightGenerate { W operator()() const { FSTERROR() << "WeightGenerate: No random generator for " << W::Type(); return W::NoWeight(); } }; namespace internal { class CompositeWeightIO { public: CompositeWeightIO(); CompositeWeightIO(char separator, std::pair<char, char> parentheses); std::pair<char, char> parentheses() const { return {open_paren_, close_paren_}; } char separator() const { return separator_; } bool error() const { return error_; } protected: const char separator_; const char open_paren_; const char close_paren_; private: bool error_; }; } // namespace internal // Helper class for writing textual composite weights. class CompositeWeightWriter : public internal::CompositeWeightIO { public: // Uses configuration from flags (FLAGS_fst_weight_separator, // FLAGS_fst_weight_parentheses). explicit CompositeWeightWriter(std::ostream &ostrm); // parentheses defines the opening and closing parenthesis characters. // Set parentheses = {0, 0} to disable writing parenthesis. CompositeWeightWriter(std::ostream &ostrm, char separator, std::pair<char, char> parentheses); CompositeWeightWriter(const CompositeWeightWriter &) = delete; CompositeWeightWriter &operator=(const CompositeWeightWriter &) = delete; // Writes open parenthesis to a stream if option selected. void WriteBegin(); // Writes element to a stream. template <class T> void WriteElement(const T &comp) { if (i_++ > 0) ostrm_ << separator_; ostrm_ << comp; } // Writes close parenthesis to a stream if option selected. void WriteEnd(); private: std::ostream &ostrm_; int i_ = 0; // Element position. }; // Helper class for reading textual composite weights. Elements are separated by // a separator character. There must be at least one element per textual // representation. Parentheses characters should be set if the composite // weights themselves contain composite weights to ensure proper parsing. class CompositeWeightReader : public internal::CompositeWeightIO { public: // Uses configuration from flags (FLAGS_fst_weight_separator, // FLAGS_fst_weight_parentheses). explicit CompositeWeightReader(std::istream &istrm); // parentheses defines the opening and closing parenthesis characters. // Set parentheses = {0, 0} to disable reading parenthesis. CompositeWeightReader(std::istream &istrm, char separator, std::pair<char, char> parentheses); CompositeWeightReader(const CompositeWeightReader &) = delete; CompositeWeightReader &operator=(const CompositeWeightReader &) = delete; // Reads open parenthesis from a stream if option selected. void ReadBegin(); // Reads element from a stream. The second argument, when true, indicates that // this will be the last element (allowing more forgiving formatting of the // last element). Returns false when last element is read. template <class T> bool ReadElement(T *comp, bool last = false); // Finalizes reading. void ReadEnd(); private: std::istream &istrm_; // Input stream. int c_ = 0; // Last character read, or EOF. int depth_ = 0; // Weight parentheses depth. }; template <class T> inline bool CompositeWeightReader::ReadElement(T *comp, bool last) { string s; const bool has_parens = open_paren_ != 0; while ((c_ != std::istream::traits_type::eof()) && !std::isspace(c_) && (c_ != separator_ || depth_ > 1 || last) && (c_ != close_paren_ || depth_ != 1)) { s += c_; // If parentheses encountered before separator, they must be matched. if (has_parens && c_ == open_paren_) { ++depth_; } else if (has_parens && c_ == close_paren_) { // Failure on unmatched parentheses. if (depth_ == 0) { FSTERROR() << "CompositeWeightReader: Unmatched close paren: " << "Is the fst_weight_parentheses flag set correctly?"; istrm_.clear(std::ios::badbit); return false; } --depth_; } c_ = istrm_.get(); } if (s.empty()) { FSTERROR() << "CompositeWeightReader: Empty element: " << "Is the fst_weight_parentheses flag set correctly?"; istrm_.clear(std::ios::badbit); return false; } std::istringstream istrm(s); istrm >> *comp; // Skips separator/close parenthesis. if (c_ != std::istream::traits_type::eof() && !std::isspace(c_)) { c_ = istrm_.get(); } const bool is_eof = c_ == std::istream::traits_type::eof(); // Clears fail bit if just EOF. if (is_eof && !istrm_.bad()) istrm_.clear(std::ios::eofbit); return !is_eof && !std::isspace(c_); } } // namespace fst #endif // FST_WEIGHT_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/lib/fst-types.cc
// Registration of common Fst and arc types. #include <fst/arc.h> #include <fst/compact-fst.h> #include <fst/const-fst.h> #include <fst/edit-fst.h> #include <fst/register.h> #include <fst/vector-fst.h> namespace fst { // Registers VectorFst, ConstFst and EditFst for common arcs types. REGISTER_FST(VectorFst, StdArc); REGISTER_FST(VectorFst, LogArc); REGISTER_FST(VectorFst, Log64Arc); REGISTER_FST(ConstFst, StdArc); REGISTER_FST(ConstFst, LogArc); REGISTER_FST(ConstFst, Log64Arc); REGISTER_FST(EditFst, StdArc); REGISTER_FST(EditFst, LogArc); REGISTER_FST(EditFst, Log64Arc); // Register CompactFst for common arcs with the default (uint32_t) size type REGISTER_FST(CompactStringFst, StdArc); REGISTER_FST(CompactStringFst, LogArc); REGISTER_FST(CompactWeightedStringFst, StdArc); REGISTER_FST(CompactWeightedStringFst, LogArc); REGISTER_FST(CompactAcceptorFst, StdArc); REGISTER_FST(CompactAcceptorFst, LogArc); REGISTER_FST(CompactUnweightedFst, StdArc); REGISTER_FST(CompactUnweightedFst, LogArc); REGISTER_FST(CompactUnweightedAcceptorFst, StdArc); REGISTER_FST(CompactUnweightedAcceptorFst, LogArc); } // namespace fst
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common/eigen_common_wrapper.h
//----------------------------------------------------------------------------- // // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // //----------------------------------------------------------------------------- #pragma once #include "onnxruntime_config.h" // build/external/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h:162:71: // error: ignoring attributes on template argument "Eigen::PacketType<const float, Eigen::DefaultDevice>::type {aka __vector(4) float}" [-Werror=ignored-attributes] #if defined(__GNUC__) #pragma GCC diagnostic push #if __GNUC__ >= 6 #pragma GCC diagnostic ignored "-Wignored-attributes" #endif #pragma GCC diagnostic ignored "-Wunused-parameter" #ifdef HAS_DEPRECATED_COPY #pragma GCC diagnostic ignored "-Wdeprecated-copy" #endif #elif defined(_MSC_VER) // build\windows\debug\external\eigen3\unsupported\eigen\cxx11\src/Tensor/Tensor.h(76): // warning C4554: '&': check operator precedence for possible error; use parentheses to clarify precedence // unsupported\eigen\cxx11\src\Tensor\TensorUInt128.h(150,0): Warning C4245: 'initializing': conversion from '__int64' // to 'uint64_t', signed/unsigned mismatch #pragma warning(push) #pragma warning(disable : 4554) #pragma warning(disable : 4245) #pragma warning(disable : 4127) #endif #include "unsupported/Eigen/CXX11/Tensor" #if defined(__GNUC__) #pragma GCC diagnostic pop #elif defined(_MSC_VER) #pragma warning(pop) #endif
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/test-properties.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions to manipulate and test property bits. #ifndef FST_TEST_PROPERTIES_H_ #define FST_TEST_PROPERTIES_H_ #include <unordered_set> #include <fst/flags.h> #include <fst/log.h> #include <fst/connect.h> #include <fst/dfs-visit.h> DECLARE_bool(fst_verify_properties); namespace fst { // namespace internal { // For a binary property, the bit is always returned set. For a trinary (i.e., // two-bit) property, both bits are returned set iff either corresponding input // bit is set. inline uint64_t KnownProperties(uint64_t props) { return kBinaryProperties | (props & kTrinaryProperties) | ((props & kPosTrinaryProperties) << 1) | ((props & kNegTrinaryProperties) >> 1); } // Tests compatibility between two sets of properties. inline bool CompatProperties(uint64_t props1, uint64_t props2) { const auto known_props1 = KnownProperties(props1); const auto known_props2 = KnownProperties(props2); const auto known_props = known_props1 & known_props2; const auto incompat_props = (props1 & known_props) ^ (props2 & known_props); if (incompat_props) { uint64_t prop = 1; for (int i = 0; i < 64; ++i, prop <<= 1) { if (prop & incompat_props) { LOG(ERROR) << "CompatProperties: Mismatch: " << PropertyNames[i] << ": props1 = " << (props1 & prop ? "true" : "false") << ", props2 = " << (props2 & prop ? "true" : "false"); } } return false; } else { return true; } } // Computes FST property values defined in properties.h. The value of each // property indicated in the mask will be determined and returned (these will // never be unknown here). In the course of determining the properties // specifically requested in the mask, certain other properties may be // determined (those with little additional expense) and their values will be // returned as well. The complete set of known properties (whether true or // false) determined by this operation will be assigned to the the value pointed // to by KNOWN. If 'use_stored' is true, pre-computed FST properties may be used // when possible. 'mask & required_mask' is used to determine whether the stored // propertoes can be used. This routine is seldom called directly; instead it is // used to implement fst.Properties(mask, true). template <class Arc> uint64_t ComputeProperties(const Fst<Arc> &fst, uint64_t mask, uint64_t *known, bool use_stored) { using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; const auto fst_props = fst.Properties(kFstProperties, false); // FST-stored. // Check stored FST properties first if allowed. if (use_stored) { const auto known_props = KnownProperties(fst_props); // If FST contains required info, return it. if ((known_props & mask) == mask) { if (known) *known = known_props; return fst_props; } } // Computes (trinary) properties explicitly. // Initialize with binary properties (already known). uint64_t comp_props = fst_props & kBinaryProperties; // Computes these trinary properties with a DFS. We compute only those that // need a DFS here, since we otherwise would like to avoid a DFS since its // stack could grow large. uint64_t dfs_props = kCyclic | kAcyclic | kInitialCyclic | kInitialAcyclic | kAccessible | kNotAccessible | kCoAccessible | kNotCoAccessible; std::vector<StateId> scc; if (mask & (dfs_props | kWeightedCycles | kUnweightedCycles)) { SccVisitor<Arc> scc_visitor(&scc, nullptr, nullptr, &comp_props); DfsVisit(fst, &scc_visitor); } // Computes any remaining trinary properties via a state and arcs iterations if (mask & ~(kBinaryProperties | dfs_props)) { comp_props |= kAcceptor | kNoEpsilons | kNoIEpsilons | kNoOEpsilons | kILabelSorted | kOLabelSorted | kUnweighted | kTopSorted | kString; if (mask & (kIDeterministic | kNonIDeterministic)) { comp_props |= kIDeterministic; } if (mask & (kODeterministic | kNonODeterministic)) { comp_props |= kODeterministic; } if (mask & (dfs_props | kWeightedCycles | kUnweightedCycles)) { comp_props |= kUnweightedCycles; } std::unique_ptr<std::unordered_set<Label>> ilabels; std::unique_ptr<std::unordered_set<Label>> olabels; StateId nfinal = 0; for (StateIterator<Fst<Arc>> siter(fst); !siter.Done(); siter.Next()) { StateId s = siter.Value(); Arc prev_arc; // Creates these only if we need to. if (mask & (kIDeterministic | kNonIDeterministic)) { ilabels.reset(new std::unordered_set<Label>()); } if (mask & (kODeterministic | kNonODeterministic)) { olabels.reset(new std::unordered_set<Label>()); } bool first_arc = true; for (ArcIterator<Fst<Arc>> aiter(fst, s); !aiter.Done(); aiter.Next()) { const auto &arc = aiter.Value(); if (ilabels && ilabels->find(arc.ilabel) != ilabels->end()) { comp_props |= kNonIDeterministic; comp_props &= ~kIDeterministic; } if (olabels && olabels->find(arc.olabel) != olabels->end()) { comp_props |= kNonODeterministic; comp_props &= ~kODeterministic; } if (arc.ilabel != arc.olabel) { comp_props |= kNotAcceptor; comp_props &= ~kAcceptor; } if (arc.ilabel == 0 && arc.olabel == 0) { comp_props |= kEpsilons; comp_props &= ~kNoEpsilons; } if (arc.ilabel == 0) { comp_props |= kIEpsilons; comp_props &= ~kNoIEpsilons; } if (arc.olabel == 0) { comp_props |= kOEpsilons; comp_props &= ~kNoOEpsilons; } if (!first_arc) { if (arc.ilabel < prev_arc.ilabel) { comp_props |= kNotILabelSorted; comp_props &= ~kILabelSorted; } if (arc.olabel < prev_arc.olabel) { comp_props |= kNotOLabelSorted; comp_props &= ~kOLabelSorted; } } if (arc.weight != Weight::One() && arc.weight != Weight::Zero()) { comp_props |= kWeighted; comp_props &= ~kUnweighted; if ((comp_props & kUnweightedCycles) && scc[s] == scc[arc.nextstate]) { comp_props |= kWeightedCycles; comp_props &= ~kUnweightedCycles; } } if (arc.nextstate <= s) { comp_props |= kNotTopSorted; comp_props &= ~kTopSorted; } if (arc.nextstate != s + 1) { comp_props |= kNotString; comp_props &= ~kString; } prev_arc = arc; first_arc = false; if (ilabels) ilabels->insert(arc.ilabel); if (olabels) olabels->insert(arc.olabel); } if (nfinal > 0) { // Final state not last. comp_props |= kNotString; comp_props &= ~kString; } const auto final_weight = fst.Final(s); if (final_weight != Weight::Zero()) { // Final state. if (final_weight != Weight::One()) { comp_props |= kWeighted; comp_props &= ~kUnweighted; } ++nfinal; } else { // Non-final state. if (fst.NumArcs(s) != 1) { comp_props |= kNotString; comp_props &= ~kString; } } } if (fst.Start() != kNoStateId && fst.Start() != 0) { comp_props |= kNotString; comp_props &= ~kString; } } if (known) *known = KnownProperties(comp_props); return comp_props; } // This is a wrapper around ComputeProperties that will cause a fatal error if // the stored properties and the computed properties are incompatible when // FLAGS_fst_verify_properties is true. This routine is seldom called directly; // instead it is used to implement fst.Properties(mask, true). template <class Arc> uint64_t TestProperties(const Fst<Arc> &fst, uint64_t mask, uint64_t *known) { if (FLAGS_fst_verify_properties) { const auto stored_props = fst.Properties(kFstProperties, false); const auto computed_props = ComputeProperties(fst, mask, known, false); if (!CompatProperties(stored_props, computed_props)) { FSTERROR() << "TestProperties: stored FST properties incorrect" << " (stored: props1, computed: props2)"; } return computed_props; } else { return ComputeProperties(fst, mask, known, true); } } // If all the properties of 'fst' corresponding to 'check_mask' are known, // returns the stored properties. Otherwise, the properties corresponding to // both 'check_mask' and 'test_mask' are computed. This is used to check for // newly-added properties that might not be set in old binary files. template <class Arc> uint64_t CheckProperties(const Fst<Arc> &fst, uint64_t check_mask, uint64_t test_mask) { auto props = fst.Properties(kFstProperties, false); if (FLAGS_fst_verify_properties) { props = TestProperties(fst, check_mask | test_mask, nullptr); } else if ((KnownProperties(props) & check_mask) != check_mask) { props = ComputeProperties(fst, check_mask | test_mask, nullptr, false); } return props & (check_mask | test_mask); } //} // namespace internal } // namespace fst #endif // FST_TEST_PROPERTIES_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/far/farlib.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // A finite-state archive (FAR) is used to store an indexable collection of // FSTs in a single file. Utilities are provided to create FARs from FSTs, // to iterate over FARs, and to extract specific FSTs from FARs. #ifndef FST_EXTENSIONS_FAR_FARLIB_H_ #define FST_EXTENSIONS_FAR_FARLIB_H_ #include <fst/extensions/far/compile-strings.h> #include <fst/extensions/far/create.h> #include <fst/extensions/far/extract.h> #include <fst/extensions/far/far.h> #include <fst/extensions/far/getters.h> #include <fst/extensions/far/info.h> #include <fst/extensions/far/print-strings.h> #endif // FST_EXTENSIONS_FAR_FARLIB_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-nodejs_12x_16k-linux-amd64-prod_pbmodel-opt.yml
build: template_file: test-linux-opt-base.tyml docker_image: "ubuntu:16.04" dependencies: - "linux-amd64-cpu-opt" system_setup: > ${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages_xenial.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-node-tests-prod.sh 12.x 16k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU NodeJS 12.x prod tests (16kHz)" description: "Testing DeepSpeech for Linux/AMD64 on NodeJS v12.x on prod model, CPU only, optimized version (16kHz)"
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/object_pool/object_pool.h
#ifndef GODEFV_MEMORY_OBJECT_POOL_H #define GODEFV_MEMORY_OBJECT_POOL_H #include "unique_ptr.h" #include <memory> #include <vector> #include <array> namespace godefv{ // Forward declaration template<class Object, template<class T> class Allocator = std::allocator, std::size_t ChunkSize = 1024> class object_pool_t; //! Custom deleter to recycle the deleted pointers of the object_pool_t. template<class Object, template<class T> class Allocator = std::allocator, std::size_t ChunkSize = 1024> struct object_pool_deleter_t{ private: object_pool_t<Object, Allocator, ChunkSize>* object_pool_ptr; public: explicit object_pool_deleter_t(decltype(object_pool_ptr) input_object_pool_ptr) : object_pool_ptr(input_object_pool_ptr) {} void operator()(Object* object_ptr) { object_pool_ptr->delete_object(object_ptr); } }; //! Allocates instances of Object efficiently (constant time and log((maximum number of Objects used at the same time)/ChunkSize) calls to malloc in the whole lifetime of the object pool). //! When an instance returned by the object pool is destroyed, its allocated memory is recycled by the object pool. Defragmenting the object pool to free memory is not possible. template<class Object, template<class T> class Allocator, std::size_t ChunkSize> class object_pool_t{ //! An object slot is an uninitialized memory space of the same size as Object. //! It is initially "free". It can then be "used" to construct an Object in place and the pointer to it is returned by the object pool. When the pointer is destroyed, the object slot is "recycled" and can be used again but it is not "free" anymore because "free" object slots are contiguous in memory. using object_slot_t=std::array<char, sizeof(Object)>; //! To minimize calls to malloc, the object slots are allocated in chunks. //! For example, if ChunkSize=8, a chunk may look like this : |used|recycled|used|used|recycled|free|free|free|. In this example, if more than 5 new Object are now asked from the object pool, at least one new chunk of 8 object slots will be allocated. using chunk_t=std::array<object_slot_t, ChunkSize>; Allocator<chunk_t> chunk_allocator; //!< This allocator can be used to have aligned memory if required. std::vector<unique_ptr_t<chunk_t, decltype(chunk_allocator)>> memory_chunks; //! Recycled object slots are tracked using a stack of pointers to them. When an object slot is recycled, a pointer to it is pushed in constant time. When a new object is constructed, a recycled object slot can be found and poped in constant time. std::vector<object_slot_t*> recycled_object_slots; object_slot_t* free_object_slots_begin; object_slot_t* free_object_slots_end; //! When a pointer provided by the ObjectPool is deleted, its memory is converted to an object slot to be recycled. void delete_object(Object* object_ptr){ object_ptr->~Object(); recycled_object_slots.push_back(reinterpret_cast<object_slot_t*>(object_ptr)); } friend object_pool_deleter_t<Object, Allocator, ChunkSize>; public: using object_t = Object; using deleter_t = object_pool_deleter_t<Object, Allocator, ChunkSize>; using object_unique_ptr_t = std::unique_ptr<object_t, deleter_t>; //!< The type returned by the object pool. object_pool_t(Allocator<chunk_t> const& allocator = Allocator<chunk_t>{}) : chunk_allocator{ allocator }, free_object_slots_begin{ free_object_slots_end } // At the begining, set the 2 iterators at the same value to simulate a full pool. {} //! Returns a unique pointer to an object_t using an unused object slot from the object pool. template<class... Args> object_unique_ptr_t make_unique(Args&&... vars){ auto construct_object_unique_ptr=[&](object_slot_t* object_slot){ return object_unique_ptr_t{ new (reinterpret_cast<object_t*>(object_slot)) object_t{ std::forward<Args>(vars)... } , deleter_t{ this } }; }; // If a recycled object slot is available, use it. if (!recycled_object_slots.empty()) { auto object_slot = recycled_object_slots.back(); recycled_object_slots.pop_back(); return construct_object_unique_ptr(object_slot); } // If the pool is full: add a new chunk. if (free_object_slots_begin == free_object_slots_end) { memory_chunks.emplace_back(chunk_allocator); auto& new_chunk = memory_chunks.back(); free_object_slots_begin=new_chunk->data(); free_object_slots_end =free_object_slots_begin+new_chunk->size(); } // We know that there is now at least one free object slot, use it. return construct_object_unique_ptr(free_object_slots_begin++); } //! Returns the total number of object slots (free, recycled, or used). std::size_t capacity() const{ return memory_chunks.size()*ChunkSize; } //! Returns the number of currently used object slots. std::size_t size() const{ return capacity() - static_cast<std::size_t>(free_object_slots_end-free_object_slots_begin) - recycled_object_slots.size(); } }; } /* namespace godefv */ #endif /* GODEFV_MEMORY_OBJECT_POOL_H */
0
coqui_public_repos/STT
coqui_public_repos/STT/doc/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = "Coqui STT" SOURCEDIR = . BUILDDIR = .build PIP_INSTALL ?= python -m pip install # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help python-reqs python-reqs: requirements.txt $(PIP_INSTALL) -r requirements.txt submodule: git submodule update --init --remote -- examples || true # Add submodule update dependency to Sphinx's "html" target html: Makefile submodule python-reqs PATH=$$HOME/.local/bin:`pwd`/../node_modules/.bin/:$$PATH \ $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -v dist: html cd $(BUILDDIR)/html/ && zip -r9 ../../html.zip *
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/vocoder_tests/test_vocoder_losses.py
import os import torch from tests import get_tests_input_path, get_tests_output_path, get_tests_path from TTS.config import BaseAudioConfig from TTS.utils.audio import AudioProcessor from TTS.utils.audio.numpy_transforms import stft from TTS.vocoder.layers.losses import MelganFeatureLoss, MultiScaleSTFTLoss, STFTLoss, TorchSTFT TESTS_PATH = get_tests_path() OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests") os.makedirs(OUT_PATH, exist_ok=True) WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") ap = AudioProcessor(**BaseAudioConfig().to_dict()) def test_torch_stft(): torch_stft = TorchSTFT(ap.fft_size, ap.hop_length, ap.win_length) # librosa stft wav = ap.load_wav(WAV_FILE) M_librosa = abs(stft(y=wav, fft_size=ap.fft_size, hop_length=ap.hop_length, win_length=ap.win_length)) # torch stft wav = torch.from_numpy(wav[None, :]).float() M_torch = torch_stft(wav) # check the difference b/w librosa and torch outputs assert (M_librosa - M_torch[0].data.numpy()).max() < 1e-5 def test_stft_loss(): stft_loss = STFTLoss(ap.fft_size, ap.hop_length, ap.win_length) wav = ap.load_wav(WAV_FILE) wav = torch.from_numpy(wav[None, :]).float() loss_m, loss_sc = stft_loss(wav, wav) assert loss_m + loss_sc == 0 loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav)) assert loss_sc < 1.0 assert loss_m + loss_sc > 0 def test_multiscale_stft_loss(): stft_loss = MultiScaleSTFTLoss( [ap.fft_size // 2, ap.fft_size, ap.fft_size * 2], [ap.hop_length // 2, ap.hop_length, ap.hop_length * 2], [ap.win_length // 2, ap.win_length, ap.win_length * 2], ) wav = ap.load_wav(WAV_FILE) wav = torch.from_numpy(wav[None, :]).float() loss_m, loss_sc = stft_loss(wav, wav) assert loss_m + loss_sc == 0 loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav)) assert loss_sc < 1.0 assert loss_m + loss_sc > 0 def test_melgan_feature_loss(): feats_real = [] feats_fake = [] # if all the features are different. for _ in range(5): # different scales scale_feats_real = [] scale_feats_fake = [] for _ in range(4): # different layers scale_feats_real.append(torch.rand([3, 5, 7])) scale_feats_fake.append(torch.rand([3, 5, 7])) feats_real.append(scale_feats_real) feats_fake.append(scale_feats_fake) loss_func = MelganFeatureLoss() loss = loss_func(feats_fake, feats_real) assert loss.item() <= 1.0 feats_real = [] feats_fake = [] # if all the features are the same for _ in range(5): # different scales scale_feats_real = [] scale_feats_fake = [] for _ in range(4): # different layers tensor = torch.rand([3, 5, 7]) scale_feats_real.append(tensor) scale_feats_fake.append(tensor) feats_real.append(scale_feats_real) feats_fake.append(scale_feats_fake) loss_func = MelganFeatureLoss() loss = loss_func(feats_fake, feats_real) assert loss.item() == 0
0
coqui_public_repos/STT-examples/python_websocket_server/helm/stt_server
coqui_public_repos/STT-examples/python_websocket_server/helm/stt_server/overrides/values.prod.yaml
# Default values for stt-server. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 4 image: repository: "<docker_repo_path>" pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: "<image_tag>" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" podAnnotations: {} podSecurityContext: {} # fsGroup: 2000 securityContext: readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1000 service: type: ClusterIP port: 8080 ingress: enabled: true annotations: nginx.ingress.kubernetes.io/enable-cors: "true" nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS" nginx.ingress.kubernetes.io/cors-allow-origin: "*" nginx.ingress.kubernetes.io/cors-allow-credentials: "true" nginx.ingress.kubernetes.io/rewrite-target: / nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" nginx.ingress.kubernetes.io/proxy-body-size: 20m hosts: - host: "<host>" paths: ["/stt-server"] tls: - secretName: "<secret_name>" hosts: - "<host>" resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi autoscaling: enabled: false minReplicas: 1 maxReplicas: 100 targetCPUUtilizationPercentage: 80 nodeSelector: {} tolerations: [] affinity: {}
0
coqui_public_repos
coqui_public_repos/STT/MANIFEST.in
include training/coqui_stt_training/VERSION include training/coqui_stt_training/GRAPH_VERSION
0
coqui_public_repos/STT-examples/uwp
coqui_public_repos/STT-examples/uwp/STTUWP/Package.appxmanifest
<?xml version="1.0" encoding="utf-8"?> <Package xmlns="http://schemas.microsoft.com/appx/manifest/foundation/windows10" xmlns:mp="http://schemas.microsoft.com/appx/2014/phone/manifest" xmlns:uap="http://schemas.microsoft.com/appx/manifest/uap/windows10" IgnorableNamespaces="uap mp"> <Identity Name="a79d1931-db08-441d-b5ce-1c9cf6b1c8ff" Publisher="CN=erikz" Version="1.0.0.0" /> <mp:PhoneIdentity PhoneProductId="a79d1931-db08-441d-b5ce-1c9cf6b1c8ff" PhonePublisherId="00000000-0000-0000-0000-000000000000"/> <Properties> <DisplayName>STTUWP</DisplayName> <PublisherDisplayName>erikz</PublisherDisplayName> <Logo>Assets\StoreLogo.png</Logo> </Properties> <Dependencies> <TargetDeviceFamily Name="Windows.Universal" MinVersion="10.0.0.0" MaxVersionTested="10.0.0.0" /> </Dependencies> <Resources> <Resource Language="x-generate"/> </Resources> <Applications> <Application Id="App" Executable="$targetnametoken$.exe" EntryPoint="STTUWP.App"> <uap:VisualElements DisplayName="STTUWP" Square150x150Logo="Assets\Square150x150Logo.png" Square44x44Logo="Assets\Square44x44Logo.png" Description="STTUWP" BackgroundColor="transparent"> <uap:DefaultTile Wide310x150Logo="Assets\Wide310x150Logo.png"/> <uap:SplashScreen Image="Assets\SplashScreen.png" /> </uap:VisualElements> </Application> </Applications> <Capabilities> <Capability Name="internetClient" /> <DeviceCapability Name="microphone"/> </Capabilities> </Package>
0
coqui_public_repos/STT
coqui_public_repos/STT/native_client/multistrap_armbian64_bullseye.conf
[General] arch=arm64 noauth=false unpack=true bootstrap=Debian aptsources=Debian cleanup=true [Debian] packages=apt libc6 libc6-dev libstdc++-9-dev linux-libc-dev libffi-dev libpython3.9-dev libsox-dev python3-numpy python3-setuptools source=http://deb.debian.org/debian keyring=debian-archive-keyring components=main suite=bullseye
0
coqui_public_repos/STT/native_client/java/app/src/main/res
coqui_public_repos/STT/native_client/java/app/src/main/res/values/colors.xml
<?xml version="1.0" encoding="utf-8"?> <resources> <color name="colorPrimary">#008577</color> <color name="colorPrimaryDark">#00574B</color> <color name="colorAccent">#D81B60</color> </resources>
0
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core
coqui_public_repos/inference-engine/third_party/onnxruntime/include/onnxruntime/core/common/status.h
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Modifications Copyright (c) Microsoft. #pragma once #include <memory> #include <ostream> #include <string> #ifdef _WIN32 #include <winerror.h> #endif namespace onnxruntime { namespace common { enum StatusCategory { NONE = 0, SYSTEM = 1, ONNXRUNTIME = 2, }; /** Error code for ONNXRuntime. */ enum StatusCode { OK = 0, FAIL = 1, INVALID_ARGUMENT = 2, NO_SUCHFILE = 3, NO_MODEL = 4, ENGINE_ERROR = 5, RUNTIME_EXCEPTION = 6, INVALID_PROTOBUF = 7, MODEL_LOADED = 8, NOT_IMPLEMENTED = 9, INVALID_GRAPH = 10, EP_FAIL = 11 }; inline const char* StatusCodeToString(StatusCode status) noexcept { switch (status) { case StatusCode::OK: return "SUCCESS"; case StatusCode::FAIL: return "FAIL"; case StatusCode::INVALID_ARGUMENT: return "INVALID_ARGUMENT"; case StatusCode::NO_SUCHFILE: return "NO_SUCHFILE"; case StatusCode::NO_MODEL: return "NO_MODEL"; case StatusCode::ENGINE_ERROR: return "ENGINE_ERROR"; case StatusCode::RUNTIME_EXCEPTION: return "RUNTIME_EXCEPTION"; case StatusCode::INVALID_PROTOBUF: return "INVALID_PROTOBUF"; case StatusCode::MODEL_LOADED: return "MODEL_LOADED"; case StatusCode::NOT_IMPLEMENTED: return "NOT_IMPLEMENTED"; case StatusCode::INVALID_GRAPH: return "INVALID_GRAPH"; case StatusCode::EP_FAIL: return "EP_FAIL"; default: return "GENERAL ERROR"; } } #ifdef _WIN32 inline HRESULT StatusCodeToHRESULT(StatusCode status) noexcept { switch (status) { case StatusCode::OK: return S_OK; case StatusCode::FAIL: return E_FAIL; case StatusCode::INVALID_ARGUMENT: return E_INVALIDARG; case StatusCode::NO_SUCHFILE: return __HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); case StatusCode::NO_MODEL: return __HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); case StatusCode::ENGINE_ERROR: return E_FAIL; case StatusCode::RUNTIME_EXCEPTION: return E_FAIL; case StatusCode::INVALID_PROTOBUF: return __HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT); case StatusCode::MODEL_LOADED: return __HRESULT_FROM_WIN32(ERROR_INTERNAL_ERROR); case StatusCode::NOT_IMPLEMENTED: return E_NOTIMPL; case StatusCode::INVALID_GRAPH: return __HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT); case StatusCode::EP_FAIL: return __HRESULT_FROM_WIN32(ERROR_INTERNAL_ERROR); default: return E_FAIL; } } #endif class Status { public: Status() noexcept = default; Status(StatusCategory category, int code, const std::string& msg); Status(StatusCategory category, int code, const char* msg); Status(StatusCategory category, int code); Status(const Status& other) : state_((other.state_ == nullptr) ? nullptr : new State(*other.state_)) {} Status& operator=(const Status& other) { if (state_ != other.state_) { if (other.state_ == nullptr) { state_.reset(); } else { state_.reset(new State(*other.state_)); } } return *this; } Status(Status&&) = default; Status& operator=(Status&&) = default; ~Status() = default; bool IsOK() const { return (state_ == nullptr); } int Code() const noexcept; StatusCategory Category() const noexcept; const std::string& ErrorMessage() const noexcept; std::string ToString() const; bool operator==(const Status& other) const { return (this->state_ == other.state_) || (ToString() == other.ToString()); } bool operator!=(const Status& other) const { return !(*this == other); } static Status OK() { return Status(); } private: static const std::string& EmptyString() noexcept; struct State { State(StatusCategory cat0, int code0, const std::string& msg0) : category(cat0), code(code0), msg(msg0) {} State(StatusCategory cat0, int code0, const char* msg0) : category(cat0), code(code0), msg(msg0) {} const StatusCategory category; const int code; const std::string msg; }; // As long as Code() is OK, state_ == nullptr. std::unique_ptr<State> state_; }; inline std::ostream& operator<<(std::ostream& out, const Status& status) { return out << status.ToString(); } } // namespace common } // namespace onnxruntime
0
coqui_public_repos
coqui_public_repos/stt-model-manager/setup.py
import setuptools setuptools.setup()
0
coqui_public_repos/inference-engine/src
coqui_public_repos/inference-engine/src/ctcdecode/ctc_beam_search_decoder.cpp
#include "ctc_beam_search_decoder.h" #include <algorithm> #include <cmath> #include <iostream> #include <limits> #include <unordered_map> #include <utility> #include "decoder_utils.h" #include "ThreadPool.h" #include "fst/fstlib.h" #include "path_trie.h" int DecoderState::init(const Alphabet& alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words) { // assign special ids abs_time_step_ = 0; space_id_ = alphabet.GetSpaceLabel(); blank_id_ = alphabet.GetSize(); beam_size_ = beam_size; cutoff_prob_ = cutoff_prob; cutoff_top_n_ = cutoff_top_n; ext_scorer_ = ext_scorer; hot_words_ = hot_words; start_expanding_ = false; // init prefixes' root PathTrie *root = new PathTrie; root->score = root->log_prob_b_prev = 0.0; prefix_root_.reset(root); prefix_root_->timesteps = &timestep_tree_root_; prefixes_.push_back(root); if (ext_scorer && (bool)(ext_scorer_->dictionary)) { // no need for std::make_shared<>() since Copy() does 'new' behind the doors auto dict_ptr = std::shared_ptr<PathTrie::FstType>(ext_scorer->dictionary->Copy(true)); root->set_dictionary(dict_ptr); auto matcher = std::make_shared<fst::SortedMatcher<PathTrie::FstType>>(*dict_ptr, fst::MATCH_INPUT); root->set_matcher(matcher); } return 0; } void DecoderState::next(const double *probs, int time_dim, int class_dim) { // prefix search over time for (size_t rel_time_step = 0; rel_time_step < time_dim; ++rel_time_step, ++abs_time_step_) { auto *prob = &probs[rel_time_step*class_dim]; // At the start of the decoding process, we delay beam expansion so that // timings on the first letters is not incorrect. As soon as we see a // timestep with blank probability lower than 0.999, we start expanding // beams. if (prob[blank_id_] < 0.999) { start_expanding_ = true; } // If not expanding yet, just continue to next timestep. if (!start_expanding_) { continue; } float min_cutoff = -NUM_FLT_INF; bool full_beam = false; if (ext_scorer_) { size_t num_prefixes = std::min(prefixes_.size(), beam_size_); std::partial_sort(prefixes_.begin(), prefixes_.begin() + num_prefixes, prefixes_.end(), prefix_compare); min_cutoff = prefixes_[num_prefixes - 1]->score + std::log(prob[blank_id_]) - std::max(0.0, ext_scorer_->beta); full_beam = (num_prefixes == beam_size_); } std::vector<std::pair<size_t, float>> log_prob_idx = get_pruned_log_probs(prob, class_dim, cutoff_prob_, cutoff_top_n_); // loop over class dim for (size_t index = 0; index < log_prob_idx.size(); index++) { auto c = log_prob_idx[index].first; auto log_prob_c = log_prob_idx[index].second; for (size_t i = 0; i < prefixes_.size() && i < beam_size_; ++i) { auto prefix = prefixes_[i]; if (full_beam && log_prob_c + prefix->score < min_cutoff) { break; } if (prefix->score == -NUM_FLT_INF) { continue; } assert(prefix->timesteps != nullptr); // blank if (c == blank_id_) { // compute probability of current path float log_p = log_prob_c + prefix->score; // combine current path with previous ones with the same prefix // the blank label comes last, so we can compare log_prob_nb_cur with log_p if (prefix->log_prob_nb_cur < log_p) { // keep current timesteps prefix->previous_timesteps = nullptr; } prefix->log_prob_b_cur = log_sum_exp(prefix->log_prob_b_cur, log_p); continue; } // repeated character if (c == prefix->character) { // compute probability of current path float log_p = log_prob_c + prefix->log_prob_nb_prev; // combine current path with previous ones with the same prefix if (prefix->log_prob_nb_cur < log_p) { // keep current timesteps prefix->previous_timesteps = nullptr; } prefix->log_prob_nb_cur = log_sum_exp( prefix->log_prob_nb_cur, log_p); } // get new prefix auto prefix_new = prefix->get_path_trie(c, log_prob_c); if (prefix_new != nullptr) { // compute probability of current path float log_p = -NUM_FLT_INF; if (c == prefix->character && prefix->log_prob_b_prev > -NUM_FLT_INF) { log_p = log_prob_c + prefix->log_prob_b_prev; } else if (c != prefix->character) { log_p = log_prob_c + prefix->score; } if (ext_scorer_) { // skip scoring the space in word based LMs PathTrie* prefix_to_score; if (ext_scorer_->is_utf8_mode()) { prefix_to_score = prefix_new; } else { prefix_to_score = prefix; } // language model scoring if (ext_scorer_->is_scoring_boundary(prefix_to_score, c)) { float score = 0.0; std::vector<std::string> ngram; ngram = ext_scorer_->make_ngram(prefix_to_score); float hot_boost = 0.0; if (!hot_words_.empty()) { std::unordered_map<std::string, float>::iterator iter; // increase prob of prefix for every word // that matches a word in the hot-words list for (std::string word : ngram) { iter = hot_words_.find(word); if ( iter != hot_words_.end() ) { // increase the log_cond_prob(prefix|LM) hot_boost += iter->second; } } } bool bos = ngram.size() < ext_scorer_->get_max_order(); score = ( ext_scorer_->get_log_cond_prob(ngram, bos) + hot_boost ) * ext_scorer_->alpha; log_p += score; log_p += ext_scorer_->beta; } } // combine current path with previous ones with the same prefix if (prefix_new->log_prob_nb_cur < log_p) { // record data needed to update timesteps // the actual update will be done if nothing better is found prefix_new->previous_timesteps = prefix->timesteps; prefix_new->new_timestep = abs_time_step_; } prefix_new->log_prob_nb_cur = log_sum_exp(prefix_new->log_prob_nb_cur, log_p); } } // end of loop over prefix } // end of loop over alphabet // update log probs prefixes_.clear(); prefix_root_->iterate_to_vec(prefixes_); // only preserve top beam_size prefixes if (prefixes_.size() > beam_size_) { std::nth_element(prefixes_.begin(), prefixes_.begin() + beam_size_, prefixes_.end(), prefix_compare); for (size_t i = beam_size_; i < prefixes_.size(); ++i) { prefixes_[i]->remove(); } // Remove the elements from std::vector prefixes_.resize(beam_size_); } } // end of loop over time } std::vector<Output> DecoderState::decode(size_t num_results) const { std::vector<PathTrie*> prefixes_copy = prefixes_; std::unordered_map<const PathTrie*, float> scores; for (PathTrie* prefix : prefixes_copy) { scores[prefix] = prefix->score; } // score the last word of each prefix that doesn't end with space if (ext_scorer_) { for (size_t i = 0; i < beam_size_ && i < prefixes_copy.size(); ++i) { PathTrie* prefix = prefixes_copy[i]; PathTrie* prefix_boundary = ext_scorer_->is_utf8_mode() ? prefix : prefix->parent; if (prefix_boundary && !ext_scorer_->is_scoring_boundary(prefix_boundary, prefix->character)) { float score = 0.0; std::vector<std::string> ngram = ext_scorer_->make_ngram(prefix); bool bos = ngram.size() < ext_scorer_->get_max_order(); score = ext_scorer_->get_log_cond_prob(ngram, bos) * ext_scorer_->alpha; score += ext_scorer_->beta; scores[prefix] += score; } } } using namespace std::placeholders; size_t num_returned = std::min(prefixes_copy.size(), num_results); std::partial_sort(prefixes_copy.begin(), prefixes_copy.begin() + num_returned, prefixes_copy.end(), std::bind(prefix_compare_external, _1, _2, scores)); std::vector<Output> outputs; outputs.reserve(num_returned); for (size_t i = 0; i < num_returned; ++i) { Output output; prefixes_copy[i]->get_path_vec(output.tokens); output.timesteps = get_history(prefixes_copy[i]->timesteps, &timestep_tree_root_); assert(output.tokens.size() == output.timesteps.size()); output.confidence = scores[prefixes_copy[i]]; outputs.push_back(output); } return outputs; } std::vector<Output> ctc_beam_search_decoder( const double *probs, int time_dim, int class_dim, const Alphabet &alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results) { VALID_CHECK_EQ(alphabet.GetSize()+1, class_dim, "Number of output classes in acoustic model does not match number of labels in the alphabet file. Alphabet file must be the same one that was used to train the acoustic model."); DecoderState state; state.init(alphabet, beam_size, cutoff_prob, cutoff_top_n, ext_scorer, hot_words); state.next(probs, time_dim, class_dim); return state.decode(num_results); } std::vector<std::vector<Output>> ctc_beam_search_decoder_batch( const double *probs, int batch_size, int time_dim, int class_dim, const int* seq_lengths, int seq_lengths_size, const Alphabet &alphabet, size_t beam_size, size_t num_processes, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results) { VALID_CHECK_GT(num_processes, 0, "num_processes must be nonnegative!"); VALID_CHECK_EQ(batch_size, seq_lengths_size, "must have one sequence length per batch element"); // thread pool ThreadPool pool(num_processes); // enqueue the tasks of decoding std::vector<std::future<std::vector<Output>>> res; for (size_t i = 0; i < batch_size; ++i) { res.emplace_back(pool.enqueue(ctc_beam_search_decoder, &probs[i*time_dim*class_dim], seq_lengths[i], class_dim, alphabet, beam_size, cutoff_prob, cutoff_top_n, ext_scorer, hot_words, num_results)); } // get decoding results std::vector<std::vector<Output>> batch_results; for (size_t i = 0; i < batch_size; ++i) { batch_results.emplace_back(res[i].get()); } return batch_results; }
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-python_35_tflite_8k-linux-amd64-prod-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-tflite-opt" args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-python_tflite-tests-prod.sh 3.5.8:m 8k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 TFLite Python v3.5 prod tests (8kHz)" description: "Testing DeepSpeech for Linux/AMD64 on Python v3.5 on prod model, TFLite, optimized version (8kHz)"
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/script/encode.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/script/fst-class.h> #include <fst/encode.h> #include <fst/script/encode.h> #include <fst/script/script-impl.h> namespace fst { namespace script { void Encode(MutableFstClass *fst, uint32 flags, bool reuse_encoder, const string &coder_fname) { EncodeArgs1 args(fst, flags, reuse_encoder, coder_fname); Apply<Operation<EncodeArgs1>>("Encode", fst->ArcType(), &args); } void Encode(MutableFstClass *fst, EncodeMapperClass *encoder) { if (!internal::ArcTypesMatch(*fst, *encoder, "Encode")) { fst->SetProperties(kError, kError); return; } EncodeArgs2 args(fst, encoder); Apply<Operation<EncodeArgs2>>("Encode", fst->ArcType(), &args); } REGISTER_FST_OPERATION(Encode, StdArc, EncodeArgs1); REGISTER_FST_OPERATION(Encode, LogArc, EncodeArgs1); REGISTER_FST_OPERATION(Encode, Log64Arc, EncodeArgs1); REGISTER_FST_OPERATION(Encode, StdArc, EncodeArgs2); REGISTER_FST_OPERATION(Encode, LogArc, EncodeArgs2); REGISTER_FST_OPERATION(Encode, Log64Arc, EncodeArgs2); } // namespace script } // namespace fst
0
coqui_public_repos/STT/native_client/kenlm
coqui_public_repos/STT/native_client/kenlm/util/string_piece_hash.hh
#ifndef UTIL_STRING_PIECE_HASH_H #define UTIL_STRING_PIECE_HASH_H #include "have.hh" #include "string_piece.hh" #include <boost/functional/hash.hpp> #include <boost/version.hpp> #ifdef HAVE_ICU U_NAMESPACE_BEGIN #endif inline size_t hash_value(const StringPiece &str) { return boost::hash_range(str.data(), str.data() + str.length()); } #ifdef HAVE_ICU U_NAMESPACE_END #endif /* Support for lookup of StringPiece in boost::unordered_map<std::string> */ struct StringPieceCompatibleHash : public std::unary_function<const StringPiece &, size_t> { size_t operator()(const StringPiece &str) const { return hash_value(str); } }; struct StringPieceCompatibleEquals : public std::binary_function<const StringPiece &, const std::string &, bool> { bool operator()(const StringPiece &first, const StringPiece &second) const { return first == second; } }; template <class T> typename T::const_iterator FindStringPiece(const T &t, const StringPiece &key) { #if BOOST_VERSION < 104200 std::string temp(key.data(), key.size()); return t.find(temp); #else return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); #endif } template <class T> typename T::iterator FindStringPiece(T &t, const StringPiece &key) { #if BOOST_VERSION < 104200 std::string temp(key.data(), key.size()); return t.find(temp); #else return t.find(key, StringPieceCompatibleHash(), StringPieceCompatibleEquals()); #endif } #endif // UTIL_STRING_PIECE_HASH_H
0
coqui_public_repos/TTS/tests
coqui_public_repos/TTS/tests/tts_tests2/test_forward_tts.py
import torch as T from TTS.tts.models.forward_tts import ForwardTTS, ForwardTTSArgs from TTS.tts.utils.helpers import sequence_mask # pylint: disable=unused-variable def expand_encoder_outputs_test(): model = ForwardTTS(ForwardTTSArgs(num_chars=10)) inputs = T.rand(2, 5, 57) durations = T.randint(1, 4, (2, 57)) x_mask = T.ones(2, 1, 57) y_mask = T.ones(2, 1, durations.sum(1).max()) expanded, _ = model.expand_encoder_outputs(inputs, durations, x_mask, y_mask) for b in range(durations.shape[0]): index = 0 for idx, dur in enumerate(durations[b]): diff = ( expanded[b, :, index : index + dur.item()] - inputs[b, :, idx].repeat(dur.item()).view(expanded[b, :, index : index + dur.item()].shape) ).sum() assert abs(diff) < 1e-6, diff index += dur def model_input_output_test(): """Assert the output shapes of the model in different modes""" # VANILLA MODEL model = ForwardTTS(ForwardTTSArgs(num_chars=10, use_pitch=False, use_aligner=False)) x = T.randint(0, 10, (2, 21)) x_lengths = T.randint(10, 22, (2,)) x_lengths[-1] = 21 x_mask = sequence_mask(x_lengths).unsqueeze(1).long() durations = T.randint(1, 4, (2, 21)) durations = durations * x_mask.squeeze(1) y_lengths = durations.sum(1) y_mask = sequence_mask(y_lengths).unsqueeze(1).long() outputs = model.forward(x, x_lengths, y_lengths, dr=durations) assert outputs["model_outputs"].shape == (2, durations.sum(1).max(), 80) assert outputs["durations_log"].shape == (2, 21) assert outputs["durations"].shape == (2, 21) assert outputs["alignments"].shape == (2, durations.sum(1).max(), 21) assert (outputs["x_mask"] - x_mask).sum() == 0.0 assert (outputs["y_mask"] - y_mask).sum() == 0.0 assert outputs["alignment_soft"] is None assert outputs["alignment_mas"] is None assert outputs["alignment_logprob"] is None assert outputs["o_alignment_dur"] is None assert outputs["pitch_avg"] is None assert outputs["pitch_avg_gt"] is None # USE PITCH model = ForwardTTS(ForwardTTSArgs(num_chars=10, use_pitch=True, use_aligner=False)) x = T.randint(0, 10, (2, 21)) x_lengths = T.randint(10, 22, (2,)) x_lengths[-1] = 21 x_mask = sequence_mask(x_lengths).unsqueeze(1).long() durations = T.randint(1, 4, (2, 21)) durations = durations * x_mask.squeeze(1) y_lengths = durations.sum(1) y_mask = sequence_mask(y_lengths).unsqueeze(1).long() pitch = T.rand(2, 1, y_lengths.max()) outputs = model.forward(x, x_lengths, y_lengths, dr=durations, pitch=pitch) assert outputs["model_outputs"].shape == (2, durations.sum(1).max(), 80) assert outputs["durations_log"].shape == (2, 21) assert outputs["durations"].shape == (2, 21) assert outputs["alignments"].shape == (2, durations.sum(1).max(), 21) assert (outputs["x_mask"] - x_mask).sum() == 0.0 assert (outputs["y_mask"] - y_mask).sum() == 0.0 assert outputs["pitch_avg"].shape == (2, 1, 21) assert outputs["pitch_avg_gt"].shape == (2, 1, 21) assert outputs["alignment_soft"] is None assert outputs["alignment_mas"] is None assert outputs["alignment_logprob"] is None assert outputs["o_alignment_dur"] is None # USE ALIGNER NETWORK model = ForwardTTS(ForwardTTSArgs(num_chars=10, use_pitch=False, use_aligner=True)) x = T.randint(0, 10, (2, 21)) x_lengths = T.randint(10, 22, (2,)) x_lengths[-1] = 21 x_mask = sequence_mask(x_lengths).unsqueeze(1).long() durations = T.randint(1, 4, (2, 21)) durations = durations * x_mask.squeeze(1) y_lengths = durations.sum(1) y_mask = sequence_mask(y_lengths).unsqueeze(1).long() y = T.rand(2, y_lengths.max(), 80) outputs = model.forward(x, x_lengths, y_lengths, dr=durations, y=y) assert outputs["model_outputs"].shape == (2, durations.sum(1).max(), 80) assert outputs["durations_log"].shape == (2, 21) assert outputs["durations"].shape == (2, 21) assert outputs["alignments"].shape == (2, durations.sum(1).max(), 21) assert (outputs["x_mask"] - x_mask).sum() == 0.0 assert (outputs["y_mask"] - y_mask).sum() == 0.0 assert outputs["alignment_soft"].shape == (2, durations.sum(1).max(), 21) assert outputs["alignment_mas"].shape == (2, durations.sum(1).max(), 21) assert outputs["alignment_logprob"].shape == (2, 1, durations.sum(1).max(), 21) assert outputs["o_alignment_dur"].shape == (2, 21) assert outputs["pitch_avg"] is None assert outputs["pitch_avg_gt"] is None # USE ALIGNER NETWORK AND PITCH model = ForwardTTS(ForwardTTSArgs(num_chars=10, use_pitch=True, use_aligner=True)) x = T.randint(0, 10, (2, 21)) x_lengths = T.randint(10, 22, (2,)) x_lengths[-1] = 21 x_mask = sequence_mask(x_lengths).unsqueeze(1).long() durations = T.randint(1, 4, (2, 21)) durations = durations * x_mask.squeeze(1) y_lengths = durations.sum(1) y_mask = sequence_mask(y_lengths).unsqueeze(1).long() y = T.rand(2, y_lengths.max(), 80) pitch = T.rand(2, 1, y_lengths.max()) outputs = model.forward(x, x_lengths, y_lengths, dr=durations, pitch=pitch, y=y) assert outputs["model_outputs"].shape == (2, durations.sum(1).max(), 80) assert outputs["durations_log"].shape == (2, 21) assert outputs["durations"].shape == (2, 21) assert outputs["alignments"].shape == (2, durations.sum(1).max(), 21) assert (outputs["x_mask"] - x_mask).sum() == 0.0 assert (outputs["y_mask"] - y_mask).sum() == 0.0 assert outputs["alignment_soft"].shape == (2, durations.sum(1).max(), 21) assert outputs["alignment_mas"].shape == (2, durations.sum(1).max(), 21) assert outputs["alignment_logprob"].shape == (2, 1, durations.sum(1).max(), 21) assert outputs["o_alignment_dur"].shape == (2, 21) assert outputs["pitch_avg"].shape == (2, 1, 21) assert outputs["pitch_avg_gt"].shape == (2, 1, 21)
0
coqui_public_repos/inference-engine
coqui_public_repos/inference-engine/src/coqui-engine.h
#ifndef COQUI_ENGINE_H #define COQUI_ENGINE_H #ifdef __cplusplus extern "C" { #endif #ifndef SWIG #if defined _MSC_VER #define ENGINE_EXPORT __declspec(dllexport) #else #define ENGINE_EXPORT __attribute__ ((visibility("default"))) #endif /*End of _MSC_VER*/ #else #define ENGINE_EXPORT #endif typedef struct CoquiEngine CoquiEngine; typedef struct CoquiModelPackage CoquiModelPackage; typedef struct CoquiStreamingState CoquiStreamingState; /** * @brief Stores text of an individual token, along with its timing information */ typedef struct TokenMetadata { /** The text corresponding to this token */ const char* const text; /** Position of the token in units of 20ms */ const unsigned int timestep; /** Position of the token in seconds */ const float start_time; } TokenMetadata; /** * @brief A single transcript computed by the model, including a confidence * value and the metadata for its constituent tokens. */ typedef struct CandidateTranscript { /** Array of TokenMetadata objects */ const TokenMetadata* const tokens; /** Size of the tokens array */ const unsigned int num_tokens; /** Approximated confidence value for this transcript. This is roughly the * sum of the acoustic model logit values for each timestep/character that * contributed to the creation of this transcript. */ const double confidence; } CandidateTranscript; /** * @brief An array of CandidateTranscript objects computed by the model. */ typedef struct Metadata { /** Array of CandidateTranscript objects */ const CandidateTranscript* const transcripts; /** Size of the transcripts array */ const unsigned int num_transcripts; } Metadata; // sphinx-doc: error_code_listing_start #define COQUI_FOR_EACH_ERROR(APPLY) \ APPLY(COQUI_ERR_OK, 0x0000, "No error.") \ APPLY(COQUI_ERR_NO_MODEL, 0x1000, "Missing model information.") \ APPLY(COQUI_ERR_INVALID_ALPHABET, 0x2000, "Invalid alphabet embedded in model. (Data corruption?)") \ APPLY(COQUI_ERR_INVALID_SHAPE, 0x2001, "Invalid model shape.") \ APPLY(COQUI_ERR_INVALID_SCORER, 0x2002, "Invalid scorer file.") \ APPLY(COQUI_ERR_MODEL_INCOMPATIBLE, 0x2003, "Incompatible model.") \ APPLY(COQUI_ERR_SCORER_NOT_ENABLED, 0x2004, "External scorer is not enabled.") \ APPLY(COQUI_ERR_SCORER_UNREADABLE, 0x2005, "Could not read scorer file.") \ APPLY(COQUI_ERR_SCORER_INVALID_LM, 0x2006, "Could not recognize language model header in scorer.") \ APPLY(COQUI_ERR_SCORER_NO_TRIE, 0x2007, "Reached end of scorer file before loading vocabulary trie.") \ APPLY(COQUI_ERR_SCORER_INVALID_TRIE, 0x2008, "Invalid magic in trie header.") \ APPLY(COQUI_ERR_SCORER_VERSION_MISMATCH, 0x2009, "Scorer file version does not match expected version.") \ APPLY(COQUI_ERR_FAIL_INIT_MMAP, 0x3000, "Failed to initialize memory mapped model.") \ APPLY(COQUI_ERR_FAIL_INIT_SESS, 0x3001, "Failed to initialize the session.") \ APPLY(COQUI_ERR_FAIL_INTERPRETER, 0x3002, "Interpreter failed.") \ APPLY(COQUI_ERR_FAIL_RUN_SESS, 0x3003, "Failed to run the session.") \ APPLY(COQUI_ERR_FAIL_CREATE_STREAM, 0x3004, "Error creating the stream.") \ APPLY(COQUI_ERR_FAIL_READ_PROTOBUF, 0x3005, "Error reading the proto buffer model file.") \ APPLY(COQUI_ERR_FAIL_CREATE_SESS, 0x3006, "Failed to create session.") \ APPLY(COQUI_ERR_FAIL_CREATE_MODEL, 0x3007, "Could not allocate model state.") \ APPLY(COQUI_ERR_FAIL_INSERT_HOTWORD, 0x3008, "Could not insert hot-word.") \ APPLY(COQUI_ERR_FAIL_CLEAR_HOTWORD, 0x3009, "Could not clear hot-words.") \ APPLY(COQUI_ERR_FAIL_ERASE_HOTWORD, 0x3010, "Could not erase hot-word.") // sphinx-doc: error_code_listing_end enum Coqui_Error_Codes { #define DEFINE(NAME, VALUE, DESC) NAME = VALUE, COQUI_FOR_EACH_ERROR(DEFINE) #undef DEFINE }; /** * @brief Performs global initialization tasks before all other APIs can be used. * * @param[out] retval a CoquiEngine pointer. * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_InitEngine(CoquiEngine** retval); /** * @brief Frees CoquiEngine object created by {@link Coqui_InitEngine} * * @param engine a CoquiEngine pointer. * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_FreeEngine(CoquiEngine* engine); /** * @brief An object providing an interface to a trained Coqui model package. * * @param model_path The path to the frozen model graph. * @param[out] retval a CoquiModelPackage pointer * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_LoadModelPackage(CoquiEngine* engine, const char* model_path, CoquiModelPackage** retval); /** * @brief Get beam width value used by the model. If {@link Coqui_SetModelBeamWidth} * was not called before, will return the default value loaded from the * model file. * * @param ctx A CoquiModelPackage pointer created with {@link Coqui_CreateModel}. * * @return Beam width value used by the model. */ ENGINE_EXPORT unsigned int Coqui_GetModelBeamWidth(const CoquiModelPackage* ctx); /** * @brief Set beam width value used by the model. * * @param ctx A CoquiModelPackage pointer created with {@link Coqui_CreateModel}. * @param beam_width The beam width used by the model. A larger beam width value * generates better results at the cost of decoding time. * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_SetModelBeamWidth(CoquiModelPackage* ctx, unsigned int beam_width); /** * @brief Return the sample rate expected by a model. * * @param ctx A CoquiModelPackage pointer created with {@link Coqui_CreateModel}. * * @return Sample rate expected by the model for its input. */ ENGINE_EXPORT int Coqui_GetModelSampleRate(const CoquiModelPackage* ctx); /** * @brief Frees associated resources and destroys model object. */ ENGINE_EXPORT void Coqui_FreeModel(CoquiModelPackage* ctx); /** * @brief Enable decoding using an external scorer. * * @param ctx The CoquiModelPackage pointer for the model being changed. * @param scorer_path The path to the external scorer file. * * @return Zero on success, non-zero on failure (invalid arguments). */ ENGINE_EXPORT int Coqui_EnableExternalScorer(CoquiModelPackage* ctx, const char* scorer_path); /** * @brief Add a hot-word and its boost. * * Words that don't occur in the scorer (e.g. proper nouns) or strings that contain spaces won't be taken into account. * * @param ctx The CoquiModelPackage pointer for the model being changed. * @param word The hot-word. * @param boost The boost. Positive value increases and negative reduces chance of a word occuring in a transcription. Excessive positive boost might lead to splitting up of letters of the word following the hot-word. * * @return Zero on success, non-zero on failure (invalid arguments). */ ENGINE_EXPORT int Coqui_AddHotWord(CoquiModelPackage* ctx, const char* word, float boost); /** * @brief Remove entry for a hot-word from the hot-words map. * * @param ctx The CoquiModelPackage pointer for the model being changed. * @param word The hot-word. * * @return Zero on success, non-zero on failure (invalid arguments). */ ENGINE_EXPORT int Coqui_EraseHotWord(CoquiModelPackage* ctx, const char* word); /** * @brief Removes all elements from the hot-words map. * * @param ctx The CoquiModelPackage pointer for the model being changed. * * @return Zero on success, non-zero on failure (invalid arguments). */ ENGINE_EXPORT int Coqui_ClearHotWords(CoquiModelPackage* ctx); /** * @brief Disable decoding using an external scorer. * * @param ctx The CoquiModelPackage pointer for the model being changed. * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_DisableExternalScorer(CoquiModelPackage* ctx); /** * @brief Set hyperparameters alpha and beta of the external scorer. * * @param ctx The CoquiModelPackage pointer for the model being changed. * @param alpha The alpha hyperparameter of the decoder. Language model weight. * @param beta The beta hyperparameter of the decoder. Word insertion weight. * * @return Zero on success, non-zero on failure. */ ENGINE_EXPORT int Coqui_SetScorerAlphaBeta(CoquiModelPackage* ctx, float alpha, float beta); /** * @brief Use the Coqui model package to convert speech to text. * * @param ctx The CoquiModelPackage pointer for the model to use. * @param buffer A 16-bit, mono raw audio signal at the appropriate * sample rate (matching what the model was trained on). * @param buffer_size The number of samples in the audio signal. * * @return The STT result. The user is responsible for freeing the string using * {@link Coqui_FreeString()}. Returns NULL on error. */ ENGINE_EXPORT char* Coqui_SpeechToText(CoquiModelPackage* ctx, const short* buffer, unsigned int buffer_size); /** * @brief Use the Coqui model package to convert speech to text and output results * including metadata. * * @param ctx The CoquiModelPackage pointer for the model to use. * @param buffer A 16-bit, mono raw audio signal at the appropriate * sample rate (matching what the model was trained on). * @param buffer_size The number of samples in the audio signal. * @param num_results The maximum number of CandidateTranscript structs to return. Returned value might be smaller than this. * * @return Metadata struct containing multiple CandidateTranscript structs. Each * transcript has per-token metadata including timing information. The * user is responsible for freeing Metadata by calling {@link Coqui_FreeMetadata()}. * Returns NULL on error. */ ENGINE_EXPORT Metadata* Coqui_SpeechToTextWithMetadata(CoquiModelPackage* ctx, const short* buffer, unsigned int buffer_size, unsigned int num_results); /** * @brief Create a new streaming inference state. The streaming state returned * by this function can then be passed to {@link Coqui_FeedAudioContent()} * and {@link Coqui_FinishStream()}. * * @param ctx The CoquiModelPackage pointer for the model to use. * @param[out] retval an opaque pointer that represents the streaming state. Can * be NULL if an error occurs. * * @return Zero for success, non-zero on failure. */ ENGINE_EXPORT int Coqui_CreateStream(CoquiModelPackage* ctx, CoquiStreamingState** retval); /** * @brief Feed audio samples to an ongoing streaming inference. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * @param buffer An array of 16-bit, mono raw audio samples at the * appropriate sample rate (matching what the model was trained on). * @param buffer_size The number of samples in @p buffer. */ ENGINE_EXPORT void Coqui_FeedAudioContent(CoquiStreamingState* sctx, const short* buffer, unsigned int buffer_size); /** * @brief Compute the intermediate decoding of an ongoing streaming inference. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * * @return The STT intermediate result. The user is responsible for freeing the * string using {@link Coqui_FreeString()}. */ ENGINE_EXPORT char* Coqui_IntermediateDecode(const CoquiStreamingState* sctx); /** * @brief Compute the intermediate decoding of an ongoing streaming inference, * return results including metadata. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * @param num_results The number of candidate transcripts to return. * * @return Metadata struct containing multiple candidate transcripts. Each transcript * has per-token metadata including timing information. The user is * responsible for freeing Metadata by calling {@link Coqui_FreeMetadata()}. * Returns NULL on error. */ ENGINE_EXPORT Metadata* Coqui_IntermediateDecodeWithMetadata(const CoquiStreamingState* sctx, unsigned int num_results); /** * @brief Compute the final decoding of an ongoing streaming inference and return * the result. Signals the end of an ongoing streaming inference. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * * @return The STT result. The user is responsible for freeing the string using * {@link Coqui_FreeString()}. * * @note This method will free the state pointer (@p sctx). */ ENGINE_EXPORT char* Coqui_FinishStream(CoquiStreamingState* sctx); /** * @brief Compute the final decoding of an ongoing streaming inference and return * results including metadata. Signals the end of an ongoing streaming * inference. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * @param num_results The number of candidate transcripts to return. * * @return Metadata struct containing multiple candidate transcripts. Each transcript * has per-token metadata including timing information. The user is * responsible for freeing Metadata by calling {@link Coqui_FreeMetadata()}. * Returns NULL on error. * * @note This method will free the state pointer (@p sctx). */ ENGINE_EXPORT Metadata* Coqui_FinishStreamWithMetadata(CoquiStreamingState* sctx, unsigned int num_results); /** * @brief Destroy a streaming state without decoding the computed logits. This * can be used if you no longer need the result of an ongoing streaming * inference and don't want to perform a costly decode operation. * * @param sctx A streaming state pointer returned by {@link Coqui_CreateStream()}. * * @note This method will free the state pointer (@p sctx). */ ENGINE_EXPORT void Coqui_FreeStream(CoquiStreamingState* sctx); /** * @brief Free memory allocated for metadata information. */ ENGINE_EXPORT void Coqui_FreeMetadata(Metadata* m); /** * @brief Free a char* string returned by the Coqui STT API. */ ENGINE_EXPORT void Coqui_FreeString(char* str); /** * @brief Returns the version of this library. The returned version is a semantic * version (SemVer 2.0.0). The string returned must be freed with {@link Coqui_FreeString()}. * * @return The version string. */ ENGINE_EXPORT char* Coqui_Version(); /** * @brief Returns a textual description corresponding to an error code. * The string returned must be freed with @{link Coqui_FreeString()}. * * @return The error description. */ ENGINE_EXPORT char* Coqui_ErrorCodeToErrorMessage(int aErrorCode); #undef ENGINE_EXPORT #ifdef __cplusplus } #endif #endif /* COQUI_ENGINE_H */
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/script/convert.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_CONVERT_H_ #define FST_SCRIPT_CONVERT_H_ #include <memory> #include <string> #include <utility> #include <fst/register.h> #include <fst/script/arg-packs.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using ConvertInnerArgs = std::pair<const FstClass &, const string &>; using ConvertArgs = WithReturnValue<FstClass *, ConvertInnerArgs>; template <class Arc> void Convert(ConvertArgs *args) { const Fst<Arc> &fst = *(std::get<0>(args->args).GetFst<Arc>()); const string &new_type = std::get<1>(args->args); std::unique_ptr<Fst<Arc>> result(Convert(fst, new_type)); args->retval = result ? new FstClass(*result) : nullptr; } FstClass *Convert(const FstClass &fst, const string &new_type); } // namespace script } // namespace fst #endif // FST_SCRIPT_CONVERT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/bin/fstrmepsilon.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> #include <fst/fst.h> #include <fst/shortest-distance.h> #include <fst/weight.h> DEFINE_bool(connect, true, "Trim output"); DEFINE_double(delta, fst::kShortestDelta, "Comparison/quantization delta"); DEFINE_int64(nstate, fst::kNoStateId, "State number threshold"); DEFINE_string(queue_type, "auto", "Queue type: one of: \"auto\", " "\"fifo\", \"lifo\", \"shortest\", \"state\", \"top\""); DEFINE_string(weight, "", "Weight threshold"); int fstrmepsilon_main(int argc, char **argv); int main(int argc, char **argv) { return fstrmepsilon_main(argc, argv); }
0
coqui_public_repos/inference-engine/third_party/kenlm/util
coqui_public_repos/inference-engine/third_party/kenlm/util/double-conversion/double-conversion.h
// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ #define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ #include "utils.h" namespace kenlm_double_conversion { class DoubleToStringConverter { public: // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the // function returns false. static const int kMaxFixedDigitsBeforePoint = 60; static const int kMaxFixedDigitsAfterPoint = 60; // When calling ToExponential with a requested_digits // parameter > kMaxExponentialDigits then the function returns false. static const int kMaxExponentialDigits = 120; // When calling ToPrecision with a requested_digits // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits // then the function returns false. static const int kMinPrecisionDigits = 1; static const int kMaxPrecisionDigits = 120; enum Flags { NO_FLAGS = 0, EMIT_POSITIVE_EXPONENT_SIGN = 1, EMIT_TRAILING_DECIMAL_POINT = 2, EMIT_TRAILING_ZERO_AFTER_POINT = 4, UNIQUE_ZERO = 8 }; // Flags should be a bit-or combination of the possible Flags-enum. // - NO_FLAGS: no special flags. // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent // form, emits a '+' for positive exponents. Example: 1.2e+2. // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is // converted into decimal format then a trailing decimal point is appended. // Example: 2345.0 is converted to "2345.". // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point // emits a trailing '0'-character. This flag requires the // EXMIT_TRAILING_DECIMAL_POINT flag. // Example: 2345.0 is converted to "2345.0". // - UNIQUE_ZERO: "-0.0" is converted to "0.0". // // Infinity symbol and nan_symbol provide the string representation for these // special values. If the string is NULL and the special value is encountered // then the conversion functions return false. // // The exponent_character is used in exponential representations. It is // usually 'e' or 'E'. // // When converting to the shortest representation the converter will // represent input numbers in decimal format if they are in the interval // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[ // (lower boundary included, greater boundary excluded). // Example: with decimal_in_shortest_low = -6 and // decimal_in_shortest_high = 21: // ToShortest(0.000001) -> "0.000001" // ToShortest(0.0000001) -> "1e-7" // ToShortest(111111111111111111111.0) -> "111111111111111110000" // ToShortest(100000000000000000000.0) -> "100000000000000000000" // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" // // When converting to precision mode the converter may add // max_leading_padding_zeroes before returning the number in exponential // format. // Example with max_leading_padding_zeroes_in_precision_mode = 6. // ToPrecision(0.0000012345, 2) -> "0.0000012" // ToPrecision(0.00000012345, 2) -> "1.2e-7" // Similarily the converter may add up to // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid // returning an exponential representation. A zero added by the // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: // ToPrecision(230.0, 2) -> "230" // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. DoubleToStringConverter(int flags, const char* infinity_symbol, const char* nan_symbol, char exponent_character, int decimal_in_shortest_low, int decimal_in_shortest_high, int max_leading_padding_zeroes_in_precision_mode, int max_trailing_padding_zeroes_in_precision_mode) : flags_(flags), infinity_symbol_(infinity_symbol), nan_symbol_(nan_symbol), exponent_character_(exponent_character), decimal_in_shortest_low_(decimal_in_shortest_low), decimal_in_shortest_high_(decimal_in_shortest_high), max_leading_padding_zeroes_in_precision_mode_( max_leading_padding_zeroes_in_precision_mode), max_trailing_padding_zeroes_in_precision_mode_( max_trailing_padding_zeroes_in_precision_mode) { // When 'trailing zero after the point' is set, then 'trailing point' // must be set too. ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) || !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0)); } // Returns a converter following the EcmaScript specification. static const DoubleToStringConverter& EcmaScriptConverter(); // Computes the shortest string of digits that correctly represent the input // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high // (see constructor) it then either returns a decimal representation, or an // exponential representation. // Example with decimal_in_shortest_low = -6, // decimal_in_shortest_high = 21, // EMIT_POSITIVE_EXPONENT_SIGN activated, and // EMIT_TRAILING_DECIMAL_POINT deactived: // ToShortest(0.000001) -> "0.000001" // ToShortest(0.0000001) -> "1e-7" // ToShortest(111111111111111111111.0) -> "111111111111111110000" // ToShortest(100000000000000000000.0) -> "100000000000000000000" // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" // // Note: the conversion may round the output if the returned string // is accurate enough to uniquely identify the input-number. // For example the most precise representation of the double 9e59 equals // "899999999999999918767229449717619953810131273674690656206848", but // the converter will return the shorter (but still correct) "9e59". // // Returns true if the conversion succeeds. The conversion always succeeds // except when the input value is special and no infinity_symbol or // nan_symbol has been given to the constructor. bool ToShortest(double value, StringBuilder* result_builder) const { return ToShortestIeeeNumber(value, result_builder, SHORTEST); } // Same as ToShortest, but for single-precision floats. bool ToShortestSingle(float value, StringBuilder* result_builder) const { return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE); } // Computes a decimal representation with a fixed number of digits after the // decimal point. The last emitted digit is rounded. // // Examples: // ToFixed(3.12, 1) -> "3.1" // ToFixed(3.1415, 3) -> "3.142" // ToFixed(1234.56789, 4) -> "1234.5679" // ToFixed(1.23, 5) -> "1.23000" // ToFixed(0.1, 4) -> "0.1000" // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00" // ToFixed(0.1, 30) -> "0.100000000000000005551115123126" // ToFixed(0.1, 17) -> "0.10000000000000001" // // If requested_digits equals 0, then the tail of the result depends on // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT. // Examples, for requested_digits == 0, // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be // - false and false: then 123.45 -> 123 // 0.678 -> 1 // - true and false: then 123.45 -> 123. // 0.678 -> 1. // - true and true: then 123.45 -> 123.0 // 0.678 -> 1.0 // // Returns true if the conversion succeeds. The conversion always succeeds // except for the following cases: // - the input value is special and no infinity_symbol or nan_symbol has // been provided to the constructor, // - 'value' > 10^kMaxFixedDigitsBeforePoint, or // - 'requested_digits' > kMaxFixedDigitsAfterPoint. // The last two conditions imply that the result will never contain more than // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters // (one additional character for the sign, and one for the decimal point). bool ToFixed(double value, int requested_digits, StringBuilder* result_builder) const; // Computes a representation in exponential format with requested_digits // after the decimal point. The last emitted digit is rounded. // If requested_digits equals -1, then the shortest exponential representation // is computed. // // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and // exponent_character set to 'e'. // ToExponential(3.12, 1) -> "3.1e0" // ToExponential(5.0, 3) -> "5.000e0" // ToExponential(0.001, 2) -> "1.00e-3" // ToExponential(3.1415, -1) -> "3.1415e0" // ToExponential(3.1415, 4) -> "3.1415e0" // ToExponential(3.1415, 3) -> "3.142e0" // ToExponential(123456789000000, 3) -> "1.235e14" // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30" // ToExponential(1000000000000000019884624838656.0, 32) -> // "1.00000000000000001988462483865600e30" // ToExponential(1234, 0) -> "1e3" // // Returns true if the conversion succeeds. The conversion always succeeds // except for the following cases: // - the input value is special and no infinity_symbol or nan_symbol has // been provided to the constructor, // - 'requested_digits' > kMaxExponentialDigits. // The last condition implies that the result will never contain more than // kMaxExponentialDigits + 8 characters (the sign, the digit before the // decimal point, the decimal point, the exponent character, the // exponent's sign, and at most 3 exponent digits). bool ToExponential(double value, int requested_digits, StringBuilder* result_builder) const; // Computes 'precision' leading digits of the given 'value' and returns them // either in exponential or decimal format, depending on // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the // constructor). // The last computed digit is rounded. // // Example with max_leading_padding_zeroes_in_precision_mode = 6. // ToPrecision(0.0000012345, 2) -> "0.0000012" // ToPrecision(0.00000012345, 2) -> "1.2e-7" // Similarily the converter may add up to // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid // returning an exponential representation. A zero added by the // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: // ToPrecision(230.0, 2) -> "230" // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no // EMIT_TRAILING_ZERO_AFTER_POINT: // ToPrecision(123450.0, 6) -> "123450" // ToPrecision(123450.0, 5) -> "123450" // ToPrecision(123450.0, 4) -> "123500" // ToPrecision(123450.0, 3) -> "123000" // ToPrecision(123450.0, 2) -> "1.2e5" // // Returns true if the conversion succeeds. The conversion always succeeds // except for the following cases: // - the input value is special and no infinity_symbol or nan_symbol has // been provided to the constructor, // - precision < kMinPericisionDigits // - precision > kMaxPrecisionDigits // The last condition implies that the result will never contain more than // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the // exponent character, the exponent's sign, and at most 3 exponent digits). bool ToPrecision(double value, int precision, StringBuilder* result_builder) const; enum DtoaMode { // Produce the shortest correct representation. // For example the output of 0.299999999999999988897 is (the less accurate // but correct) 0.3. SHORTEST, // Same as SHORTEST, but for single-precision floats. SHORTEST_SINGLE, // Produce a fixed number of digits after the decimal point. // For instance fixed(0.1, 4) becomes 0.1000 // If the input number is big, the output will be big. FIXED, // Fixed number of digits (independent of the decimal point). PRECISION }; // The maximal number of digits that are needed to emit a double in base 10. // A higher precision can be achieved by using more digits, but the shortest // accurate representation of any double will never use more digits than // kBase10MaximalLength. // Note that DoubleToAscii null-terminates its input. So the given buffer // should be at least kBase10MaximalLength + 1 characters long. static const int kBase10MaximalLength = 17; // Converts the given double 'v' to ascii. 'v' must not be NaN, +Infinity, or // -Infinity. In SHORTEST_SINGLE-mode this restriction also applies to 'v' // after it has been casted to a single-precision float. That is, in this // mode static_cast<float>(v) must not be NaN, +Infinity or -Infinity. // // The result should be interpreted as buffer * 10^(point-length). // // The output depends on the given mode: // - SHORTEST: produce the least amount of digits for which the internal // identity requirement is still satisfied. If the digits are printed // (together with the correct exponent) then reading this number will give // 'v' again. The buffer will choose the representation that is closest to // 'v'. If there are two at the same distance, than the one farther away // from 0 is chosen (halfway cases - ending with 5 - are rounded up). // In this mode the 'requested_digits' parameter is ignored. // - SHORTEST_SINGLE: same as SHORTEST but with single-precision. // - FIXED: produces digits necessary to print a given number with // 'requested_digits' digits after the decimal point. The produced digits // might be too short in which case the caller has to fill the remainder // with '0's. // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. // Halfway cases are rounded towards +/-Infinity (away from 0). The call // toFixed(0.15, 2) thus returns buffer="2", point=0. // The returned buffer may contain digits that would be truncated from the // shortest representation of the input. // - PRECISION: produces 'requested_digits' where the first digit is not '0'. // Even though the length of produced digits usually equals // 'requested_digits', the function is allowed to return fewer digits, in // which case the caller has to fill the missing digits with '0's. // Halfway cases are again rounded away from 0. // DoubleToAscii expects the given buffer to be big enough to hold all // digits and a terminating null-character. In SHORTEST-mode it expects a // buffer of at least kBase10MaximalLength + 1. In all other modes the // requested_digits parameter and the padding-zeroes limit the size of the // output. Don't forget the decimal point, the exponent character and the // terminating null-character when computing the maximal output size. // The given length is only used in debug mode to ensure the buffer is big // enough. static void DoubleToAscii(double v, DtoaMode mode, int requested_digits, char* buffer, int buffer_length, bool* sign, int* length, int* point); private: // Implementation for ToShortest and ToShortestSingle. bool ToShortestIeeeNumber(double value, StringBuilder* result_builder, DtoaMode mode) const; // If the value is a special value (NaN or Infinity) constructs the // corresponding string using the configured infinity/nan-symbol. // If either of them is NULL or the value is not special then the // function returns false. bool HandleSpecialValues(double value, StringBuilder* result_builder) const; // Constructs an exponential representation (i.e. 1.234e56). // The given exponent assumes a decimal point after the first decimal digit. void CreateExponentialRepresentation(const char* decimal_digits, int length, int exponent, StringBuilder* result_builder) const; // Creates a decimal representation (i.e 1234.5678). void CreateDecimalRepresentation(const char* decimal_digits, int length, int decimal_point, int digits_after_point, StringBuilder* result_builder) const; const int flags_; const char* const infinity_symbol_; const char* const nan_symbol_; const char exponent_character_; const int decimal_in_shortest_low_; const int decimal_in_shortest_high_; const int max_leading_padding_zeroes_in_precision_mode_; const int max_trailing_padding_zeroes_in_precision_mode_; DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter); }; class StringToDoubleConverter { public: // Enumeration for allowing octals and ignoring junk when converting // strings to numbers. enum Flags { NO_FLAGS = 0, ALLOW_HEX = 1, ALLOW_OCTALS = 2, ALLOW_TRAILING_JUNK = 4, ALLOW_LEADING_SPACES = 8, ALLOW_TRAILING_SPACES = 16, ALLOW_SPACES_AFTER_SIGN = 32 }; // Flags should be a bit-or combination of the possible Flags-enum. // - NO_FLAGS: no special flags. // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers. // Ex: StringToDouble("0x1234") -> 4660.0 // In StringToDouble("0x1234.56") the characters ".56" are trailing // junk. The result of the call is hence dependent on // the ALLOW_TRAILING_JUNK flag and/or the junk value. // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK, // the string will not be parsed as "0" followed by junk. // // - ALLOW_OCTALS: recognizes the prefix "0" for octals: // If a sequence of octal digits starts with '0', then the number is // read as octal integer. Octal numbers may only be integers. // Ex: StringToDouble("01234") -> 668.0 // StringToDouble("012349") -> 12349.0 // Not a sequence of octal // // digits. // In StringToDouble("01234.56") the characters ".56" are trailing // junk. The result of the call is hence dependent on // the ALLOW_TRAILING_JUNK flag and/or the junk value. // In StringToDouble("01234e56") the characters "e56" are trailing // junk, too. // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of // a double literal. // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces, // new-lines, and tabs. // - ALLOW_TRAILING_SPACES: ignore trailing whitespace. // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign. // Ex: StringToDouble("- 123.2") -> -123.2. // StringToDouble("+ 123.2") -> 123.2 // // empty_string_value is returned when an empty string is given as input. // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string // containing only spaces is converted to the 'empty_string_value', too. // // junk_string_value is returned when // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not // part of a double-literal) is found. // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a // double literal. // // infinity_symbol and nan_symbol are strings that are used to detect // inputs that represent infinity and NaN. They can be null, in which case // they are ignored. // The conversion routine first reads any possible signs. Then it compares the // following character of the input-string with the first character of // the infinity, and nan-symbol. If either matches, the function assumes, that // a match has been found, and expects the following input characters to match // the remaining characters of the special-value symbol. // This means that the following restrictions apply to special-value symbols: // - they must not start with signs ('+', or '-'), // - they must not have the same first character. // - they must not start with digits. // // Examples: // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK, // empty_string_value = 0.0, // junk_string_value = NaN, // infinity_symbol = "infinity", // nan_symbol = "nan": // StringToDouble("0x1234") -> 4660.0. // StringToDouble("0x1234K") -> 4660.0. // StringToDouble("") -> 0.0 // empty_string_value. // StringToDouble(" ") -> NaN // junk_string_value. // StringToDouble(" 1") -> NaN // junk_string_value. // StringToDouble("0x") -> NaN // junk_string_value. // StringToDouble("-123.45") -> -123.45. // StringToDouble("--123.45") -> NaN // junk_string_value. // StringToDouble("123e45") -> 123e45. // StringToDouble("123E45") -> 123e45. // StringToDouble("123e+45") -> 123e45. // StringToDouble("123E-45") -> 123e-45. // StringToDouble("123e") -> 123.0 // trailing junk ignored. // StringToDouble("123e-") -> 123.0 // trailing junk ignored. // StringToDouble("+NaN") -> NaN // NaN string literal. // StringToDouble("-infinity") -> -inf. // infinity literal. // StringToDouble("Infinity") -> NaN // junk_string_value. // // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES, // empty_string_value = 0.0, // junk_string_value = NaN, // infinity_symbol = NULL, // nan_symbol = NULL: // StringToDouble("0x1234") -> NaN // junk_string_value. // StringToDouble("01234") -> 668.0. // StringToDouble("") -> 0.0 // empty_string_value. // StringToDouble(" ") -> 0.0 // empty_string_value. // StringToDouble(" 1") -> 1.0 // StringToDouble("0x") -> NaN // junk_string_value. // StringToDouble("0123e45") -> NaN // junk_string_value. // StringToDouble("01239E45") -> 1239e45. // StringToDouble("-infinity") -> NaN // junk_string_value. // StringToDouble("NaN") -> NaN // junk_string_value. StringToDoubleConverter(int flags, double empty_string_value, double junk_string_value, const char* infinity_symbol, const char* nan_symbol) : flags_(flags), empty_string_value_(empty_string_value), junk_string_value_(junk_string_value), infinity_symbol_(infinity_symbol), nan_symbol_(nan_symbol) { } // Performs the conversion. // The output parameter 'processed_characters_count' is set to the number // of characters that have been processed to read the number. // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included // in the 'processed_characters_count'. Trailing junk is never included. double StringToDouble(const char* buffer, int length, int* processed_characters_count) const; // Same as StringToDouble above but for 16 bit characters. double StringToDouble(const uc16* buffer, int length, int* processed_characters_count) const; // Same as StringToDouble but reads a float. // Note that this is not equivalent to static_cast<float>(StringToDouble(...)) // due to potential double-rounding. float StringToFloat(const char* buffer, int length, int* processed_characters_count) const; // Same as StringToFloat above but for 16 bit characters. float StringToFloat(const uc16* buffer, int length, int* processed_characters_count) const; private: const int flags_; const double empty_string_value_; const double junk_string_value_; const char* const infinity_symbol_; const char* const nan_symbol_; template <class Iterator> double StringToIeee(Iterator start_pointer, int length, bool read_as_double, int* processed_characters_count) const; DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); }; } // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/bin/fstminimize-main.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Minimizes a deterministic FST. #include <cstring> #include <memory> #include <string> #include <fst/flags.h> #include <fst/log.h> #include <fst/script/minimize.h> DECLARE_double(delta); DECLARE_bool(allow_nondet); int fstminimize_main(int argc, char **argv) { namespace s = fst::script; using fst::script::MutableFstClass; using fst::script::VectorFstClass; string usage = "Minimizes a deterministic FST.\n\n Usage: "; usage += argv[0]; usage += " [in.fst [out1.fst [out2.fst]]]\n"; std::set_new_handler(FailedNewHandler); SET_FLAGS(usage.c_str(), &argc, &argv, true); if (argc > 4) { ShowUsage(); return 1; } const string in_name = (argc > 1 && strcmp(argv[1], "-") != 0) ? argv[1] : ""; const string out1_name = (argc > 2 && strcmp(argv[2], "-") != 0) ? argv[2] : ""; const string out2_name = (argc > 3 && strcmp(argv[3], "-") != 0) ? argv[3] : ""; if (out1_name.empty() && out2_name.empty() && argc > 3) { LOG(ERROR) << argv[0] << ": Both outputs can't be standard output."; return 1; } std::unique_ptr<MutableFstClass> fst1(MutableFstClass::Read(in_name, true)); if (!fst1) return 1; if (argc > 3) { std::unique_ptr<MutableFstClass> fst2(new VectorFstClass(fst1->ArcType())); s::Minimize(fst1.get(), fst2.get(), FLAGS_delta, FLAGS_allow_nondet); if (!fst2->Write(out2_name)) return 1; } else { s::Minimize(fst1.get(), nullptr, FLAGS_delta, FLAGS_allow_nondet); } return !fst1->Write(out1_name); }
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/test-cpp_16k-linux-amd64-opt.yml
build: template_file: test-linux-opt-base.tyml dependencies: - "linux-amd64-cpu-opt" - "test-training_16k-linux-amd64-py36m-opt" test_model_task: "test-training_16k-linux-amd64-py36m-opt" args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-cpp-ds-tests.sh 16k" workerType: "${docker.dsTests}" metadata: name: "DeepSpeech Linux AMD64 CPU C++ tests (16kHz)" description: "Testing DeepSpeech C++ for Linux/AMD64, CPU only, optimized version (16kHz)"
0
coqui_public_repos/STT/native_client/kenlm/lm
coqui_public_repos/STT/native_client/kenlm/lm/interpolate/normalize_test.cc
#include "normalize.hh" #include "interpolate_info.hh" #include "merge_probabilities.hh" #include "../common/ngram_stream.hh" #include "../../util/stream/chain.hh" #include "../../util/stream/multi_stream.hh" #define BOOST_TEST_MODULE NormalizeTest #include <boost/test/unit_test.hpp> namespace lm { namespace interpolate { namespace { // log without backoff const float kInputs[] = {-0.3, 1.2, -9.8, 4.0, -7.0, 0.0}; class WriteInput { public: WriteInput() {} void Run(const util::stream::ChainPosition &to) { util::stream::Stream out(to); for (WordIndex i = 0; i < sizeof(kInputs) / sizeof(float); ++i, ++out) { memcpy(out.Get(), &i, sizeof(WordIndex)); memcpy((uint8_t*)out.Get() + sizeof(WordIndex), &kInputs[i], sizeof(float)); } out.Poison(); } }; void CheckOutput(const util::stream::ChainPosition &from) { NGramStream<float> in(from); float sum = 0.0; for (WordIndex i = 0; i < sizeof(kInputs) / sizeof(float) - 1 /* <s> at the end */; ++i) { sum += pow(10.0, kInputs[i]); } sum = log10(sum); BOOST_REQUIRE(in); BOOST_CHECK_CLOSE(kInputs[0] - sum, in->Value(), 0.0001); BOOST_REQUIRE(++in); BOOST_CHECK_CLOSE(kInputs[1] - sum, in->Value(), 0.0001); BOOST_REQUIRE(++in); BOOST_CHECK_CLOSE(kInputs[2] - sum, in->Value(), 0.0001); BOOST_REQUIRE(++in); BOOST_CHECK_CLOSE(kInputs[3] - sum, in->Value(), 0.0001); BOOST_REQUIRE(++in); BOOST_CHECK_CLOSE(kInputs[4] - sum, in->Value(), 0.0001); BOOST_REQUIRE(++in); BOOST_CHECK_CLOSE(kInputs[5] - sum, in->Value(), 0.0001); BOOST_CHECK(!++in); } BOOST_AUTO_TEST_CASE(Unigrams) { InterpolateInfo info; info.lambdas.push_back(2.0); info.lambdas.push_back(-0.1); info.orders.push_back(1); info.orders.push_back(1); BOOST_CHECK_EQUAL(0, MakeEncoder(info, 1).EncodedLength()); // No backoffs. util::stream::Chains blank(0); util::FixedArray<util::stream::ChainPositions> models_by_order(2); models_by_order.push_back(blank); models_by_order.push_back(blank); util::stream::Chains merged_probabilities(1); util::stream::Chains probabilities_out(1); util::stream::Chains backoffs_out(0); merged_probabilities.push_back(util::stream::ChainConfig(sizeof(WordIndex) + sizeof(float) + sizeof(float), 2, 24)); probabilities_out.push_back(util::stream::ChainConfig(sizeof(WordIndex) + sizeof(float), 2, 100)); merged_probabilities[0] >> WriteInput(); Normalize(info, models_by_order, merged_probabilities, probabilities_out, backoffs_out); util::stream::ChainPosition checker(probabilities_out[0].Add()); merged_probabilities >> util::stream::kRecycle; probabilities_out >> util::stream::kRecycle; CheckOutput(checker); probabilities_out.Wait(); } }}} // namespaces
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/log.h
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Google-style logging declarations and inline definitions. #ifndef FST_LIB_LOG_H_ #define FST_LIB_LOG_H_ #include <cassert> #include <iostream> #include <string> #include <fst/types.h> #include <fst/flags.h> using std::string; DECLARE_int32_t(v); class LogMessage { public: LogMessage(const string &type) : fatal_(type == "FATAL") { std::cerr << type << ": "; } ~LogMessage() { std::cerr << std::endl; if(fatal_) exit(1); } std::ostream &stream() { return std::cerr; } private: bool fatal_; }; #define LOG(type) LogMessage(#type).stream() #define VLOG(level) if ((level) <= FLAGS_v) LOG(INFO) // Checks inline void FstCheck(bool x, const char* expr, const char *file, int line) { if (!x) { LOG(FATAL) << "Check failed: \"" << expr << "\" file: " << file << " line: " << line; } } #define CHECK(x) FstCheck(static_cast<bool>(x), #x, __FILE__, __LINE__) #define CHECK_EQ(x, y) CHECK((x) == (y)) #define CHECK_LT(x, y) CHECK((x) < (y)) #define CHECK_GT(x, y) CHECK((x) > (y)) #define CHECK_LE(x, y) CHECK((x) <= (y)) #define CHECK_GE(x, y) CHECK((x) >= (y)) #define CHECK_NE(x, y) CHECK((x) != (y)) // Debug checks #define DCHECK(x) assert(x) #define DCHECK_EQ(x, y) DCHECK((x) == (y)) #define DCHECK_LT(x, y) DCHECK((x) < (y)) #define DCHECK_GT(x, y) DCHECK((x) > (y)) #define DCHECK_LE(x, y) DCHECK((x) <= (y)) #define DCHECK_GE(x, y) DCHECK((x) >= (y)) #define DCHECK_NE(x, y) DCHECK((x) != (y)) // Ports #define ATTRIBUTE_DEPRECATED __attribute__((deprecated)) #endif // FST_LIB_LOG_H_
0
coqui_public_repos/STT/native_client/java
coqui_public_repos/STT/native_client/java/libstt/gradle.properties
ABI_FILTERS = arm64-v8a;armeabi-v7a;x86_64
0
coqui_public_repos/STT-models/tatar/itml
coqui_public_repos/STT-models/tatar/itml/v0.1.0/MODEL_CARD.md
# Model card for Tatar STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group. - Model language: Tatar / Татарча / `tt` - Model date: April 26, 2021 - Model type: `Speech-to-Text` - Model version: `v0.1.0` - Compatible with 🐸 STT version: `v0.9.3` - License: AGPL - Citation details: `@techreport{tatar-stt, author = {Tyers,Francis}, title = {Tatar STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-TT-0.1} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [Tatar Language](https://en.wikipedia.org/wiki/Tatar_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/tt/). |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|85.8\%|31.7\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on Common Voice 6.1 train. ## Evaluation data The Model was evaluated on Common Voice 6.1 test. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/compose-filter.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Classes for filtering the composition matches, e.g. for correct epsilon // handling. #ifndef FST_COMPOSE_FILTER_H_ #define FST_COMPOSE_FILTER_H_ #include <fst/filter-state.h> #include <fst/fst-decl.h> // For optional argument declarations #include <fst/fst.h> #include <fst/matcher.h> namespace fst { // Composition filters determine which matches are allowed to proceed. The // filter's state is represeted by the type ComposeFilter::FilterState. // The basic filters handle correct epsilon matching. Their interface is: // // template <class M1, class M2> // class ComposeFilter { // public: // using Matcher1 = ...; // using Matcher2 = ...; // using FST1 = typename M1::FST; // using FST2 = typename M2::FST; // using FilterState = ...; // // using Arc = typename FST1::Arc; // using StateId = typename Arc::StateId; // using Weight = typename Arc::Weight; // // // Required constructor. // ComposeFilter(const FST1 &fst1, const FST2 &fst2, // M1 *matcher1 = nullptr, M2 *matcher2 = nullptr); // // // If safe=true, the copy is thread-safe. See Fst<>::Copy() // // for further doc. // ComposeFilter(const ComposeFilter<M1, M2> &filter, // bool safe = false); // // // Return start state of filter. // FilterState Start() const; // // // Specifies current composition state. // void SetState(StateId s1, StateId s2, const FilterState &fs); // // // Apply filter at current composition state to these transitions. If an // // arc label to be matched is kNolabel, then that side does not consume a // // symbol. Returns the new filter state or, if disallowed, // // FilterState::NoState(). The filter is permitted to modify its inputs // // (e.g. for optimization reasons). // FilterState FilterArc(Arc *arc1, Arc *arc2) const; // // Apply filter at current composition state to these final weights // // (cf. superfinal transitions). The filter may modify its inputs // // (e.g. for optimization reasons). // void FilterFinal(Weight *w1, Weight *w2) const; // // // Return the respective matchers. Ownership stays with filter. These // // methods allow the filter to access and possibly modify the compositio // // matchers (useful, e.g., with lookahead). // // Matcher1 *GetMatcher1(); // // Matcher2 *GetMatcher2(); // // // This specifies how the filter affects the composition result properties. // It takes as argument the properties that would apply with a trivial // // composition filter. // uint64 Properties(uint64 props) const; // }; // // This filter allows only exact matching of symbols from FST1 with on FST2; // e.g., no special interpretation of epsilons. template <class M1, class M2 /* = M1 */> class NullComposeFilter { public: using Matcher1 = M1; using Matcher2 = M2; using FST1 = typename M1::FST; using FST2 = typename M2::FST; using FilterState = TrivialFilterState; using Arc = typename FST1::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; NullComposeFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr) : matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)), matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()) {} NullComposeFilter(const NullComposeFilter<M1, M2> &filter, bool safe = false) : matcher1_(filter.matcher1_->Copy(safe)), matcher2_(filter.matcher2_->Copy(safe)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()) {} FilterState Start() const { return FilterState(true); } void SetState(StateId, StateId, const FilterState &) {} FilterState FilterArc(Arc *arc1, Arc *arc2) const { return (arc1->olabel == kNoLabel || arc2->ilabel == kNoLabel) ? FilterState::NoState() : FilterState(true); } void FilterFinal(Weight *, Weight *) const {} Matcher1 *GetMatcher1() { return matcher1_.get(); } Matcher2 *GetMatcher2() { return matcher2_.get(); } uint64 Properties(uint64 props) const { return props; } private: std::unique_ptr<Matcher1> matcher1_; std::unique_ptr<Matcher2> matcher2_; const FST1 &fst1_; const FST2 &fst2_; }; // This filter allows all epsilon matches, potentially resulting in redundant // epsilon paths. The use of this filter gives correct results iff one of the // following conditions hold: // // (1) The semiring is idempotent, // (2) the first FST is output-epsilon free, or // (3) the second FST is input-epsilon free. // // For (1), redundant epsilon paths may be created but won't hurt correctness. // For (2) and (3), no redundant paths are created. template <class M1, class M2 /* = M1 */> class TrivialComposeFilter { public: using Matcher1 = M1; using Matcher2 = M2; using FST1 = typename M1::FST; using FST2 = typename M2::FST; using FilterState = TrivialFilterState; using Arc = typename FST1::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; TrivialComposeFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr) : matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)), matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()) {} TrivialComposeFilter(const TrivialComposeFilter<Matcher1, Matcher2> &filter, bool safe = false) : matcher1_(filter.matcher1_->Copy(safe)), matcher2_(filter.matcher2_->Copy(safe)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()) {} FilterState Start() const { return FilterState(true); } void SetState(StateId, StateId, const FilterState &) {} FilterState FilterArc(Arc *, Arc *) const { return FilterState(true); } void FilterFinal(Weight *, Weight *) const {} Matcher1 *GetMatcher1() { return matcher1_.get(); } Matcher2 *GetMatcher2() { return matcher2_.get(); } uint64 Properties(uint64 props) const { return props; } private: std::unique_ptr<Matcher1> matcher1_; std::unique_ptr<Matcher2> matcher2_; const FST1 &fst1_; const FST2 &fst2_; }; // This filter requires epsilons on FST1 to be read before epsilons on FST2. template <class M1, class M2 /* = M1 */> class SequenceComposeFilter { public: using Matcher1 = M1; using Matcher2 = M2; using FST1 = typename M1::FST; using FST2 = typename M2::FST; using FilterState = CharFilterState; using Arc = typename FST1::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; SequenceComposeFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr) : matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)), matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)), fst1_(matcher1_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} SequenceComposeFilter(const SequenceComposeFilter<Matcher1, Matcher2> &filter, bool safe = false) : matcher1_(filter.matcher1_->Copy(safe)), matcher2_(filter.matcher2_->Copy(safe)), fst1_(matcher1_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} FilterState Start() const { return FilterState(0); } void SetState(StateId s1, StateId s2, const FilterState &fs) { if (s1_ == s1 && s2_ == s2 && fs == fs_) return; s1_ = s1; s2_ = s2; fs_ = fs; const auto na1 = internal::NumArcs(fst1_, s1); const auto ne1 = internal::NumOutputEpsilons(fst1_, s1); const bool fin1 = internal::Final(fst1_, s1) != Weight::Zero(); alleps1_ = na1 == ne1 && !fin1; noeps1_ = ne1 == 0; } FilterState FilterArc(Arc *arc1, Arc *arc2) const { if (arc1->olabel == kNoLabel) { return alleps1_ ? FilterState::NoState() : noeps1_ ? FilterState(0) : FilterState(1); } else if (arc2->ilabel == kNoLabel) { return fs_ != FilterState(0) ? FilterState::NoState() : FilterState(0); } else { return arc1->olabel == 0 ? FilterState::NoState() : FilterState(0); } } void FilterFinal(Weight *, Weight *) const {} Matcher1 *GetMatcher1() { return matcher1_.get(); } Matcher2 *GetMatcher2() { return matcher2_.get(); } uint64 Properties(uint64 props) const { return props; } private: std::unique_ptr<Matcher1> matcher1_; std::unique_ptr<Matcher2> matcher2_; const FST1 &fst1_; StateId s1_; // Current fst1_ state. StateId s2_; // Current fst2_ state. FilterState fs_; // Current filter state. bool alleps1_; // Only epsilons (and non-final) leaving s1_? bool noeps1_; // No epsilons leaving s1_? }; // This filter requires epsilons on FST2 to be read before epsilons on FST1. template <class M1, class M2 /* = M1 */> class AltSequenceComposeFilter { public: using Matcher1 = M1; using Matcher2 = M2; using FST1 = typename M1::FST; using FST2 = typename M2::FST; using FilterState = CharFilterState; using Arc = typename FST1::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; AltSequenceComposeFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr) : matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)), matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)), fst2_(matcher2_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} AltSequenceComposeFilter( const AltSequenceComposeFilter<Matcher1, Matcher2> &filter, bool safe = false) : matcher1_(filter.matcher1_->Copy(safe)), matcher2_(filter.matcher2_->Copy(safe)), fst2_(matcher2_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} FilterState Start() const { return FilterState(0); } void SetState(StateId s1, StateId s2, const FilterState &fs) { if (s1_ == s1 && s2_ == s2 && fs == fs_) return; s1_ = s1; s2_ = s2; fs_ = fs; const auto na2 = internal::NumArcs(fst2_, s2); const auto ne2 = internal::NumInputEpsilons(fst2_, s2); const bool fin2 = internal::Final(fst2_, s2) != Weight::Zero(); alleps2_ = na2 == ne2 && !fin2; noeps2_ = ne2 == 0; } FilterState FilterArc(Arc *arc1, Arc *arc2) const { if (arc2->ilabel == kNoLabel) { return alleps2_ ? FilterState::NoState() : noeps2_ ? FilterState(0) : FilterState(1); } else if (arc1->olabel == kNoLabel) { return fs_ == FilterState(1) ? FilterState::NoState() : FilterState(0); } else { return arc1->olabel == 0 ? FilterState::NoState() : FilterState(0); } } void FilterFinal(Weight *, Weight *) const {} Matcher1 *GetMatcher1() { return matcher1_.get(); } Matcher2 *GetMatcher2() { return matcher2_.get(); } uint64 Properties(uint64 props) const { return props; } private: std::unique_ptr<Matcher1> matcher1_; std::unique_ptr<Matcher2> matcher2_; const FST2 &fst2_; StateId s1_; // Current fst1_ state. StateId s2_; // Current fst2_ state. FilterState fs_; // Current filter state. bool alleps2_; // Only epsilons (and non-final) leaving s2_? bool noeps2_; // No epsilons leaving s2_? }; // This filter requires epsilons on FST1 to be matched with epsilons on FST2 // whenever possible. (Template arg default declared in fst-decl.h.) template <class M1, class M2 /* = M1 */> class MatchComposeFilter { public: using Matcher1 = M1; using Matcher2 = M2; using FST1 = typename M1::FST; using FST2 = typename M2::FST; using FilterState = CharFilterState; using Arc = typename FST1::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; MatchComposeFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr) : matcher1_(matcher1 ? matcher1 : new Matcher1(fst1, MATCH_OUTPUT)), matcher2_(matcher2 ? matcher2 : new Matcher2(fst2, MATCH_INPUT)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} MatchComposeFilter(const MatchComposeFilter<Matcher1, Matcher2> &filter, bool safe = false) : matcher1_(filter.matcher1_->Copy(safe)), matcher2_(filter.matcher2_->Copy(safe)), fst1_(matcher1_->GetFst()), fst2_(matcher2_->GetFst()), s1_(kNoStateId), s2_(kNoStateId), fs_(kNoStateId) {} FilterState Start() const { return FilterState(0); } void SetState(StateId s1, StateId s2, const FilterState &fs) { if (s1_ == s1 && s2_ == s2 && fs == fs_) return; s1_ = s1; s2_ = s2; fs_ = fs; size_t na1 = internal::NumArcs(fst1_, s1); size_t ne1 = internal::NumOutputEpsilons(fst1_, s1); bool f1 = internal::Final(fst1_, s1) != Weight::Zero(); alleps1_ = na1 == ne1 && !f1; noeps1_ = ne1 == 0; size_t na2 = internal::NumArcs(fst2_, s2); size_t ne2 = internal::NumInputEpsilons(fst2_, s2); bool f2 = internal::Final(fst2_, s2) != Weight::Zero(); alleps2_ = na2 == ne2 && !f2; noeps2_ = ne2 == 0; } FilterState FilterArc(Arc *arc1, Arc *arc2) const { if (arc2->ilabel == kNoLabel) { // Epsilon in FST1. return fs_ == FilterState(0) ? (noeps2_ ? FilterState(0) : (alleps2_ ? FilterState::NoState() : FilterState(1))) : (fs_ == FilterState(1) ? FilterState(1) : FilterState::NoState()); } else if (arc1->olabel == kNoLabel) { // Epsilon in FST2. return fs_ == FilterState(0) ? (noeps1_ ? FilterState(0) : (alleps1_ ? FilterState::NoState() : FilterState(2))) : (fs_ == FilterState(2) ? FilterState(2) : FilterState::NoState()); } else if (arc1->olabel == 0) { // Epsilon in both. return fs_ == FilterState(0) ? FilterState(0) : FilterState::NoState(); } else { // Both are non-epsilons. return FilterState(0); } } void FilterFinal(Weight *, Weight *) const {} Matcher1 *GetMatcher1() { return matcher1_.get(); } Matcher2 *GetMatcher2() { return matcher2_.get(); } uint64 Properties(uint64 props) const { return props; } private: std::unique_ptr<Matcher1> matcher1_; std::unique_ptr<Matcher2> matcher2_; const FST1 &fst1_; const FST2 &fst2_; StateId s1_; // Current fst1_ state. StateId s2_; // Current fst2_ state. FilterState fs_; // Current filter state ID. bool alleps1_; // Only epsilson (and non-final) leaving s1? bool alleps2_; // Only epsilons (and non-final) leaving s2? bool noeps1_; // No epsilons leaving s1? bool noeps2_; // No epsilons leaving s2? }; // This filter works with the MultiEpsMatcher to determine if multi-epsilons are // preserved in the composition output (rather than rewritten as 0) and // ensures correct properties. template <class Filter> class MultiEpsFilter { public: using Matcher1 = typename Filter::Matcher1; using Matcher2 = typename Filter::Matcher2; using FST1 = typename Filter::FST1; using FST2 = typename Filter::FST2; using FilterState = typename Filter::FilterState; using Arc = typename Filter::Arc; using Label = typename Arc::Label; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; MultiEpsFilter(const FST1 &fst1, const FST2 &fst2, Matcher1 *matcher1 = nullptr, Matcher2 *matcher2 = nullptr, bool keep_multi_eps = false) : filter_(fst1, fst2, matcher1, matcher2), keep_multi_eps_(keep_multi_eps) {} MultiEpsFilter(const MultiEpsFilter<Filter> &filter, bool safe = false) : filter_(filter.filter_, safe), keep_multi_eps_(filter.keep_multi_eps_) {} FilterState Start() const { return filter_.Start(); } void SetState(StateId s1, StateId s2, const FilterState &fs) { return filter_.SetState(s1, s2, fs); } FilterState FilterArc(Arc *arc1, Arc *arc2) const { const auto fs = filter_.FilterArc(arc1, arc2); if (keep_multi_eps_) { if (arc1->olabel == kNoLabel) arc1->ilabel = arc2->ilabel; if (arc2->ilabel == kNoLabel) arc2->olabel = arc1->olabel; } return fs; } void FilterFinal(Weight *w1, Weight *w2) const { return filter_.FilterFinal(w1, w2); } Matcher1 *GetMatcher1() { return filter_.GetMatcher1(); } Matcher2 *GetMatcher2() { return filter_.GetMatcher2(); } uint64 Properties(uint64 iprops) const { const auto oprops = filter_.Properties(iprops); return oprops & kILabelInvariantProperties & kOLabelInvariantProperties; } private: Filter filter_; bool keep_multi_eps_; }; } // namespace fst #endif // FST_COMPOSE_FILTER_H_
0
coqui_public_repos/STT
coqui_public_repos/STT/ci_scripts/all-vars.sh
#!/bin/bash set -xe ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." &> /dev/null && pwd) CI_TASK_DIR=${CI_TASK_DIR:-${ROOT_DIR}} export OS=$(uname) if [ "${OS}" = "Linux" ]; then export DS_ROOT_TASK=${CI_TASK_DIR} export PYENV_ROOT="${DS_ROOT_TASK}/pyenv-root" export DS_CPU_COUNT=$(nproc) fi; if [ "${OS}" = "${CI_MSYS_VERSION}" ]; then export CI_TASK_DIR="$(cygpath ${CI_TASK_DIR})" export DS_ROOT_TASK=${CI_TASK_DIR} export PYENV_ROOT="${CI_TASK_DIR}/pyenv-root" export PLATFORM_EXE_SUFFIX=.exe export DS_CPU_COUNT=$(nproc) # Those are the versions available on NuGet.org export SUPPORTED_PYTHON_VERSIONS="3.5.4:ucs2 3.6.8:ucs2 3.7.6:ucs2 3.8.1:ucs2 3.9.0:ucs2" fi; if [ "${OS}" = "Darwin" ]; then export DS_ROOT_TASK=${CI_TASK_DIR} export DS_CPU_COUNT=$(sysctl hw.ncpu |cut -d' ' -f2) export PYENV_ROOT="${DS_ROOT_TASK}/pyenv-root" fi export CI_ARTIFACTS_DIR=${CI_ARTIFACTS_DIR:-${CI_TASK_DIR}/artifacts} export CI_TMP_DIR=${CI_TMP_DIR:-/tmp} export ANDROID_TMP_DIR=/data/local/tmp mkdir -p ${CI_TMP_DIR} || true export DS_TFDIR=${DS_ROOT_TASK}/tensorflow export DS_DSDIR=${DS_ROOT_TASK}/ export DS_EXAMPLEDIR=${DS_ROOT_TASK}/examples export DS_VERSION="$(cat ${DS_DSDIR}/training/coqui_stt_training/VERSION)" export GRADLE_USER_HOME=${DS_ROOT_TASK}/gradle-cache export ANDROID_SDK_HOME=${DS_ROOT_TASK}/STT/Android/SDK/ export ANDROID_NDK_HOME=${DS_ROOT_TASK}/STT/Android/android-ndk-r19c/ WGET=${WGET:-"wget"} TAR=${TAR:-"tar"} XZ=${XZ:-"xz -9 -T0"} ZIP=${ZIP:-"zip"} UNXZ=${UNXZ:-"xz -T0 -d"} UNGZ=${UNGZ:-"gunzip"} if [ "${OS}" = "Darwin" ]; then TAR="gtar" fi if [ "${OS}" = "${CI_MSYS_VERSION}" ]; then WGET=/usr/bin/wget.exe TAR=/usr/bin/tar.exe XZ="xz -9 -T0 -c -" UNXZ="xz -9 -T0 -d" fi model_source="${STT_TEST_MODEL}" model_name="$(basename "${model_source}")" ldc93s1_sample_filename=''
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/types/base_class.hpp
/*! \file base_class.hpp \brief Support for base classes (virtual and non-virtual) \ingroup OtherTypes */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_TYPES_BASE_CLASS_HPP_ #define CEREAL_TYPES_BASE_CLASS_HPP_ #include "cereal/details/traits.hpp" #include "cereal/details/polymorphic_impl_fwd.hpp" namespace cereal { namespace base_class_detail { //! Used to register polymorphic relations and avoid the need to include //! polymorphic.hpp when no polymorphism is used /*! @internal */ template <class Base, class Derived, bool IsPolymorphic = std::is_polymorphic<Base>::value> struct RegisterPolymorphicBaseClass { static void bind() { } }; //! Polymorphic version /*! @internal */ template <class Base, class Derived> struct RegisterPolymorphicBaseClass<Base, Derived, true> { static void bind() { detail::RegisterPolymorphicCaster<Base, Derived>::bind(); } }; } //! Casts a derived class to its non-virtual base class in a way that safely supports abstract classes /*! This should be used in cases when a derived type needs to serialize its base type. This is better than directly using static_cast, as it allows for serialization of pure virtual (abstract) base classes. This also automatically registers polymorphic relation between the base and derived class, assuming they are indeed polymorphic. Note this is not the same as polymorphic type registration. For more information see the documentation on polymorphism. If using a polymorphic class, be sure to include support for polymorphism (cereal/types/polymorphic.hpp). \sa virtual_base_class @code{.cpp} struct MyBase { int x; virtual void foo() = 0; template <class Archive> void serialize( Archive & ar ) { ar( x ); } }; struct MyDerived : public MyBase //<-- Note non-virtual inheritance { int y; virtual void foo() {}; template <class Archive> void serialize( Archive & ar ) { ar( cereal::base_class<MyBase>(this) ); ar( y ); } }; @endcode */ template<class Base> struct base_class : private traits::detail::BaseCastBase { template<class Derived> base_class(Derived const * derived) : base_ptr(const_cast<Base*>(static_cast<Base const *>(derived))) { static_assert( std::is_base_of<Base, Derived>::value, "Can only use base_class on a valid base class" ); base_class_detail::RegisterPolymorphicBaseClass<Base, Derived>::bind(); } Base * base_ptr; }; //! Casts a derived class to its virtual base class in a way that allows cereal to track inheritance /*! This should be used in cases when a derived type features virtual inheritance from some base type. This allows cereal to track the inheritance and to avoid making duplicate copies during serialization. It is safe to use virtual_base_class in all circumstances for serializing base classes, even in cases where virtual inheritance does not take place, though it may be slightly faster to utilize cereal::base_class<> if you do not need to worry about virtual inheritance. This also automatically registers polymorphic relation between the base and derived class, assuming they are indeed polymorphic. Note this is not the same as polymorphic type registration. For more information see the documentation on polymorphism. If using a polymorphic class, be sure to include support for polymorphism (cereal/types/polymorphic.hpp). \sa base_class @code{.cpp} struct MyBase { int x; template <class Archive> void serialize( Archive & ar ) { ar( x ); } }; struct MyLeft : virtual MyBase //<-- Note the virtual inheritance { int y; template <class Archive> void serialize( Archive & ar ) { ar( cereal::virtual_base_class<MyBase>( this ) ); ar( y ); } }; struct MyRight : virtual MyBase { int z; template <class Archive> void serialize( Archive & ar ) { ar( cereal::virtual_base_clas<MyBase>( this ) ); ar( z ); } }; // diamond virtual inheritance; contains one copy of each base class struct MyDerived : virtual MyLeft, virtual MyRight { int a; template <class Archive> void serialize( Archive & ar ) { ar( cereal::virtual_base_class<MyLeft>( this ) ); // safely serialize data members in MyLeft ar( cereal::virtual_base_class<MyRight>( this ) ); // safely serialize data members in MyRight ar( a ); // Because we used virtual_base_class, cereal will ensure that only one instance of MyBase is // serialized as we traverse the inheritance heirarchy. This means that there will be one copy // each of the variables x, y, z, and a // If we had chosen to use static_cast<> instead, cereal would perform no tracking and // assume that every base class should be serialized (in this case leading to a duplicate // serialization of MyBase due to diamond inheritance }; } @endcode */ template<class Base> struct virtual_base_class : private traits::detail::BaseCastBase { template<class Derived> virtual_base_class(Derived const * derived) : base_ptr(const_cast<Base*>(static_cast<Base const *>(derived))) { static_assert( std::is_base_of<Base, Derived>::value, "Can only use virtual_base_class on a valid base class" ); base_class_detail::RegisterPolymorphicBaseClass<Base, Derived>::bind(); } Base * base_ptr; }; } // namespace cereal #endif // CEREAL_TYPES_BASE_CLASS_HPP_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/bin/fstdraw.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_bool(acceptor, false, "Input in acceptor format"); DEFINE_string(isymbols, "", "Input label symbol table"); DEFINE_string(osymbols, "", "Output label symbol table"); DEFINE_string(ssymbols, "", "State label symbol table"); DEFINE_bool(numeric, false, "Print numeric labels"); DEFINE_int32(precision, 5, "Set precision (number of char/float)"); DEFINE_string(float_format, "g", "Floating-point format, one of: \"e\", \"f\", or \"g\""); DEFINE_bool(show_weight_one, false, "Print/draw arc weights and final weights equal to Weight::One()"); DEFINE_string(title, "", "Set figure title"); DEFINE_bool(portrait, false, "Portrait mode (def: landscape)"); DEFINE_bool(vertical, false, "Draw bottom-to-top instead of left-to-right"); DEFINE_int32(fontsize, 14, "Set fontsize"); DEFINE_double(height, 11, "Set height"); DEFINE_double(width, 8.5, "Set width"); DEFINE_double(nodesep, 0.25, "Set minimum separation between nodes (see dot documentation)"); DEFINE_double(ranksep, 0.40, "Set minimum separation between ranks (see dot documentation)"); DEFINE_bool(allow_negative_labels, false, "Allow negative labels (not recommended; may cause conflicts)"); int fstdraw_main(int argc, char **argv); int main(int argc, char **argv) { return fstdraw_main(argc, argv); }
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/intersect.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Class to compute the intersection of two FSAs. #ifndef FST_INTERSECT_H_ #define FST_INTERSECT_H_ #include <algorithm> #include <vector> #include <fst/log.h> #include <fst/cache.h> #include <fst/compose.h> namespace fst { using IntersectOptions = ComposeOptions; template <class Arc, class M = Matcher<Fst<Arc>>, class Filter = SequenceComposeFilter<M>, class StateTable = GenericComposeStateTable<Arc, typename Filter::FilterState>> struct IntersectFstOptions : public ComposeFstOptions<Arc, M, Filter, StateTable> { IntersectFstOptions() {} explicit IntersectFstOptions(const CacheOptions &opts, M *matcher1 = nullptr, M *matcher2 = nullptr, Filter *filter = nullptr, StateTable *state_table = nullptr) : ComposeFstOptions<Arc, M, Filter, StateTable>(opts, matcher1, matcher2, filter, state_table) {} }; // Computes the intersection (Hadamard product) of two FSAs. This version is a // delayed FST. Only strings that are in both automata are retained in the // result. // // The two arguments must be acceptors. One of the arguments must be // label-sorted. // // Complexity: same as ComposeFst. // // Caveats: same as ComposeFst. template <class A> class IntersectFst : public ComposeFst<A> { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using ComposeFst<A>::CreateBase; using ComposeFst<A>::CreateBase1; using ComposeFst<A>::Properties; IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const CacheOptions &opts = CacheOptions()) : ComposeFst<Arc>(CreateBase(fst1, fst2, opts)) { const bool acceptors = fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true); if (!acceptors) { FSTERROR() << "IntersectFst: Input FSTs are not acceptors"; GetMutableImpl()->SetProperties(kError); } } template <class M, class Filter, class StateTable> IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const IntersectFstOptions<Arc, M, Filter, StateTable> &opts) : ComposeFst<Arc>(CreateBase1(fst1, fst2, opts)) { const bool acceptors = fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true); if (!acceptors) { FSTERROR() << "IntersectFst: input FSTs are not acceptors"; GetMutableImpl()->SetProperties(kError); } } // See Fst<>::Copy() for doc. IntersectFst(const IntersectFst<Arc> &fst, bool safe = false) : ComposeFst<Arc>(fst, safe) {} // Get a copy of this IntersectFst. See Fst<>::Copy() for further doc. IntersectFst<Arc> *Copy(bool safe = false) const override { return new IntersectFst<Arc>(*this, safe); } private: using ImplToFst<internal::ComposeFstImplBase<A>>::GetImpl; using ImplToFst<internal::ComposeFstImplBase<A>>::GetMutableImpl; }; // Specialization for IntersectFst. template <class Arc> class StateIterator<IntersectFst<Arc>> : public StateIterator<ComposeFst<Arc>> { public: explicit StateIterator(const IntersectFst<Arc> &fst) : StateIterator<ComposeFst<Arc>>(fst) {} }; // Specialization for IntersectFst. template <class Arc> class ArcIterator<IntersectFst<Arc>> : public ArcIterator<ComposeFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const IntersectFst<Arc> &fst, StateId s) : ArcIterator<ComposeFst<Arc>>(fst, s) {} }; // Useful alias when using StdArc. using StdIntersectFst = IntersectFst<StdArc>; // Computes the intersection (Hadamard product) of two FSAs. This version // writes the intersection to an output MurableFst. Only strings that are in // both automata are retained in the result. // // The two arguments must be acceptors. One of the arguments must be // label-sorted. // // Complexity: same as Compose. // // Caveats: same as Compose. template <class Arc> void Intersect(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2, MutableFst<Arc> *ofst, const IntersectOptions &opts = IntersectOptions()) { using M = Matcher<Fst<Arc>>; if (opts.filter_type == AUTO_FILTER) { CacheOptions nopts; nopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, nopts); } else if (opts.filter_type == SEQUENCE_FILTER) { IntersectFstOptions<Arc> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } else if (opts.filter_type == ALT_SEQUENCE_FILTER) { IntersectFstOptions<Arc, M, AltSequenceComposeFilter<M>> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } else if (opts.filter_type == MATCH_FILTER) { IntersectFstOptions<Arc, M, MatchComposeFilter<M>> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } if (opts.connect) Connect(ofst); } } // namespace fst #endif // FST_INTERSECT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/ngram/nthbit.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_NGRAM_NTHBIT_H_ #define FST_EXTENSIONS_NGRAM_NTHBIT_H_ #include <fst/types.h> #include <fst/compat.h> extern uint32_t nth_bit_bit_offset[]; inline uint32_t nth_bit(uint64_t v, uint32_t r) { uint32_t shift = 0; uint32_t c = __builtin_popcount(v & 0xffffffff); uint32_t mask = -(r > c); r -= c & mask; shift += (32 & mask); c = __builtin_popcount((v >> shift) & 0xffff); mask = -(r > c); r -= c & mask; shift += (16 & mask); c = __builtin_popcount((v >> shift) & 0xff); mask = -(r > c); r -= c & mask; shift += (8 & mask); return shift + ((nth_bit_bit_offset[(v >> shift) & 0xff] >> ((r - 1) << 2)) & 0xf); } #endif // FST_EXTENSIONS_NGRAM_NTHBIT_H_
0
coqui_public_repos/STT/data
coqui_public_repos/STT/data/smoke_test/ldc93s1.csv
wav_filename,wav_filesize,transcript LDC93S1.wav,93638,she had your dark suit in greasy wash water all year
0
coqui_public_repos/TTS/TTS/vocoder
coqui_public_repos/TTS/TTS/vocoder/models/univnet_discriminator.py
import torch import torch.nn.functional as F from torch import nn from torch.nn.utils import spectral_norm from torch.nn.utils.parametrizations import weight_norm from TTS.utils.audio.torch_transforms import TorchSTFT from TTS.vocoder.models.hifigan_discriminator import MultiPeriodDiscriminator LRELU_SLOPE = 0.1 class SpecDiscriminator(nn.Module): """docstring for Discriminator.""" def __init__(self, fft_size=1024, hop_length=120, win_length=600, use_spectral_norm=False): super().__init__() norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.fft_size = fft_size self.hop_length = hop_length self.win_length = win_length self.stft = TorchSTFT(fft_size, hop_length, win_length) self.discriminators = nn.ModuleList( [ norm_f(nn.Conv2d(1, 32, kernel_size=(3, 9), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))), ] ) self.out = norm_f(nn.Conv2d(32, 1, 3, 1, 1)) def forward(self, y): fmap = [] with torch.no_grad(): y = y.squeeze(1) y = self.stft(y) y = y.unsqueeze(1) for _, d in enumerate(self.discriminators): y = d(y) y = F.leaky_relu(y, LRELU_SLOPE) fmap.append(y) y = self.out(y) fmap.append(y) return torch.flatten(y, 1, -1), fmap class MultiResSpecDiscriminator(torch.nn.Module): def __init__( # pylint: disable=dangerous-default-value self, fft_sizes=[1024, 2048, 512], hop_sizes=[120, 240, 50], win_lengths=[600, 1200, 240], window="hann_window" ): super().__init__() self.discriminators = nn.ModuleList( [ SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window), SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window), SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window), ] ) def forward(self, x): scores = [] feats = [] for d in self.discriminators: score, feat = d(x) scores.append(score) feats.append(feat) return scores, feats class UnivnetDiscriminator(nn.Module): """Univnet discriminator wrapping MPD and MSD.""" def __init__(self): super().__init__() self.mpd = MultiPeriodDiscriminator() self.msd = MultiResSpecDiscriminator() def forward(self, x): """ Args: x (Tensor): input waveform. Returns: List[Tensor]: discriminator scores. List[List[Tensor]]: list of list of features from each layers of each discriminator. """ scores, feats = self.mpd(x) scores_, feats_ = self.msd(x) return scores + scores_, feats + feats_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/bin/fstprint.cc
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #include <fst/flags.h> DEFINE_bool(acceptor, false, "Input in acceptor format?"); DEFINE_string(isymbols, "", "Input label symbol table"); DEFINE_string(osymbols, "", "Output label symbol table"); DEFINE_string(ssymbols, "", "State label symbol table"); DEFINE_bool(numeric, false, "Print numeric labels?"); DEFINE_string(save_isymbols, "", "Save input symbol table to file"); DEFINE_string(save_osymbols, "", "Save output symbol table to file"); DEFINE_bool(show_weight_one, false, "Print/draw arc weights and final weights equal to semiring One?"); DEFINE_bool(allow_negative_labels, false, "Allow negative labels (not recommended; may cause conflicts)?"); DEFINE_string(missing_symbol, "", "Symbol to print when lookup fails (default raises error)"); int fstprint_main(int argc, char **argv); int main(int argc, char **argv) { return fstprint_main(argc, argv); }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/script/randgen.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_RANDGEN_H_ #define FST_SCRIPT_RANDGEN_H_ #include <ctime> #include <tuple> #include <fst/randgen.h> #include <fst/script/fst-class.h> #include <fst/script/script-impl.h> namespace fst { namespace script { using RandGenArgs = std::tuple<const FstClass &, MutableFstClass *, time_t, const RandGenOptions<RandArcSelection> &>; template <class Arc> void RandGen(RandGenArgs *args) { const Fst<Arc> &ifst = *(std::get<0>(*args).GetFst<Arc>()); MutableFst<Arc> *ofst = std::get<1>(*args)->GetMutableFst<Arc>(); const time_t seed = std::get<2>(*args); const auto &opts = std::get<3>(*args); switch (opts.selector) { case UNIFORM_ARC_SELECTOR: { const UniformArcSelector<Arc> selector(seed); const RandGenOptions<UniformArcSelector<Arc>> ropts( selector, opts.max_length, opts.npath, opts.weighted, opts.remove_total_weight); RandGen(ifst, ofst, ropts); return; } case FAST_LOG_PROB_ARC_SELECTOR: { const FastLogProbArcSelector<Arc> selector(seed); const RandGenOptions<FastLogProbArcSelector<Arc>> ropts( selector, opts.max_length, opts.npath, opts.weighted, opts.remove_total_weight); RandGen(ifst, ofst, ropts); return; } case LOG_PROB_ARC_SELECTOR: { const LogProbArcSelector<Arc> selector(seed); const RandGenOptions<LogProbArcSelector<Arc>> ropts( selector, opts.max_length, opts.npath, opts.weighted, opts.remove_total_weight); RandGen(ifst, ofst, ropts); return; } } } void RandGen(const FstClass &ifst, MutableFstClass *ofst, time_t seed = time(nullptr), const RandGenOptions<RandArcSelection> &opts = RandGenOptions<RandArcSelection>(UNIFORM_ARC_SELECTOR)); } // namespace script } // namespace fst #endif // FST_SCRIPT_RANDGEN_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/script/encode.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_SCRIPT_ENCODE_H_ #define FST_SCRIPT_ENCODE_H_ #include <memory> #include <string> #include <tuple> #include <utility> #include <fst/encode.h> #include <fst/script/encodemapper-class.h> #include <fst/script/fst-class.h> namespace fst { namespace script { using EncodeArgs1 = std::tuple<MutableFstClass *, uint32_t, bool, const string &>; template <class Arc> void Encode(EncodeArgs1 *args) { MutableFst<Arc> *fst = std::get<0>(*args)->GetMutableFst<Arc>(); const string &coder_fname = std::get<3>(*args); // If true, reuse encode from disk. If false, make a new encoder and just use // the filename argument as the destination state. std::unique_ptr<EncodeMapper<Arc>> encoder( std::get<2>(*args) ? EncodeMapper<Arc>::Read(coder_fname, ENCODE) : new EncodeMapper<Arc>(std::get<1>(*args), ENCODE)); Encode(fst, encoder.get()); if (!std::get<2>(*args)) encoder->Write(coder_fname); } using EncodeArgs2 = std::pair<MutableFstClass *, EncodeMapperClass *>; template <class Arc> void Encode(EncodeArgs2 *args) { MutableFst<Arc> *fst = std::get<0>(*args)->GetMutableFst<Arc>(); EncodeMapper<Arc> *encoder = std::get<1>(*args)->GetEncodeMapper<Arc>(); Encode(fst, encoder); } void Encode(MutableFstClass *fst, uint32_t flags, bool reuse_encoder, const string &coder_fname); void Encode(MutableFstClass *fst, EncodeMapperClass *encoder); } // namespace script } // namespace fst #endif // FST_SCRIPT_ENCODE_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/intersect.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Class to compute the intersection of two FSAs. #ifndef FST_INTERSECT_H_ #define FST_INTERSECT_H_ #include <algorithm> #include <vector> #include <fst/log.h> #include <fst/cache.h> #include <fst/compose.h> namespace fst { using IntersectOptions = ComposeOptions; template <class Arc, class M = Matcher<Fst<Arc>>, class Filter = SequenceComposeFilter<M>, class StateTable = GenericComposeStateTable<Arc, typename Filter::FilterState>> struct IntersectFstOptions : public ComposeFstOptions<Arc, M, Filter, StateTable> { IntersectFstOptions() {} explicit IntersectFstOptions(const CacheOptions &opts, M *matcher1 = nullptr, M *matcher2 = nullptr, Filter *filter = nullptr, StateTable *state_table = nullptr) : ComposeFstOptions<Arc, M, Filter, StateTable>(opts, matcher1, matcher2, filter, state_table) {} }; // Computes the intersection (Hadamard product) of two FSAs. This version is a // delayed FST. Only strings that are in both automata are retained in the // result. // // The two arguments must be acceptors. One of the arguments must be // label-sorted. // // Complexity: same as ComposeFst. // // Caveats: same as ComposeFst. template <class A> class IntersectFst : public ComposeFst<A> { public: using Arc = A; using StateId = typename Arc::StateId; using Weight = typename Arc::Weight; using ComposeFst<A>::CreateBase; using ComposeFst<A>::CreateBase1; using ComposeFst<A>::Properties; IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const CacheOptions &opts = CacheOptions()) : ComposeFst<Arc>(CreateBase(fst1, fst2, opts)) { const bool acceptors = fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true); if (!acceptors) { FSTERROR() << "IntersectFst: Input FSTs are not acceptors"; GetMutableImpl()->SetProperties(kError); } } template <class M, class Filter, class StateTable> IntersectFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2, const IntersectFstOptions<Arc, M, Filter, StateTable> &opts) : ComposeFst<Arc>(CreateBase1(fst1, fst2, opts)) { const bool acceptors = fst1.Properties(kAcceptor, true) && fst2.Properties(kAcceptor, true); if (!acceptors) { FSTERROR() << "IntersectFst: input FSTs are not acceptors"; GetMutableImpl()->SetProperties(kError); } } // See Fst<>::Copy() for doc. IntersectFst(const IntersectFst<Arc> &fst, bool safe = false) : ComposeFst<Arc>(fst, safe) {} // Get a copy of this IntersectFst. See Fst<>::Copy() for further doc. IntersectFst<Arc> *Copy(bool safe = false) const override { return new IntersectFst<Arc>(*this, safe); } private: using ImplToFst<internal::ComposeFstImplBase<A>>::GetImpl; using ImplToFst<internal::ComposeFstImplBase<A>>::GetMutableImpl; }; // Specialization for IntersectFst. template <class Arc> class StateIterator<IntersectFst<Arc>> : public StateIterator<ComposeFst<Arc>> { public: explicit StateIterator(const IntersectFst<Arc> &fst) : StateIterator<ComposeFst<Arc>>(fst) {} }; // Specialization for IntersectFst. template <class Arc> class ArcIterator<IntersectFst<Arc>> : public ArcIterator<ComposeFst<Arc>> { public: using StateId = typename Arc::StateId; ArcIterator(const IntersectFst<Arc> &fst, StateId s) : ArcIterator<ComposeFst<Arc>>(fst, s) {} }; // Useful alias when using StdArc. using StdIntersectFst = IntersectFst<StdArc>; // Computes the intersection (Hadamard product) of two FSAs. This version // writes the intersection to an output MurableFst. Only strings that are in // both automata are retained in the result. // // The two arguments must be acceptors. One of the arguments must be // label-sorted. // // Complexity: same as Compose. // // Caveats: same as Compose. template <class Arc> void Intersect(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2, MutableFst<Arc> *ofst, const IntersectOptions &opts = IntersectOptions()) { using M = Matcher<Fst<Arc>>; if (opts.filter_type == AUTO_FILTER) { CacheOptions nopts; nopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, nopts); } else if (opts.filter_type == SEQUENCE_FILTER) { IntersectFstOptions<Arc> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } else if (opts.filter_type == ALT_SEQUENCE_FILTER) { IntersectFstOptions<Arc, M, AltSequenceComposeFilter<M>> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } else if (opts.filter_type == MATCH_FILTER) { IntersectFstOptions<Arc, M, MatchComposeFilter<M>> iopts; iopts.gc_limit = 0; // Cache only the last state for fastest copy. *ofst = IntersectFst<Arc>(ifst1, ifst2, iopts); } if (opts.connect) Connect(ofst); } } // namespace fst #endif // FST_INTERSECT_H_
0
coqui_public_repos/inference-engine/src
coqui_public_repos/inference-engine/src/ctcdecode/build_archive.py
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import glob import os import shlex import subprocess import sys from multiprocessing.dummy import Pool if sys.platform.startswith("win"): ARGS = ["/nologo", "/D KENLM_MAX_ORDER=6", "/EHsc", "/source-charset:utf-8"] OPT_ARGS = ["/O2", "/MT", "/D NDEBUG"] DBG_ARGS = ["/Od", "/MTd", "/Zi", "/U NDEBUG", "/D DEBUG"] OPENFST_DIR = "third_party/openfst-1.6.9-win" else: ARGS = [ "-fPIC", "-DKENLM_MAX_ORDER=6", "-std=c++11", "-Wno-unused-local-typedefs", "-Wno-sign-compare", ] OPT_ARGS = ["-O3", "-DNDEBUG"] DBG_ARGS = ["-O0", "-g", "-UNDEBUG", "-DDEBUG"] OPENFST_DIR = "third_party/openfst-1.6.7" INCLUDES = [ "..", "../kenlm", OPENFST_DIR + "/src/include", "third_party/ThreadPool", "third_party/object_pool", ] KENLM_FILES = ( glob.glob("../kenlm/util/*.cc") + glob.glob("../kenlm/lm/*.cc") + glob.glob("../kenlm/util/double-conversion/*.cc") ) KENLM_FILES += glob.glob(OPENFST_DIR + "/src/lib/*.cc") KENLM_FILES = [ fn for fn in KENLM_FILES if not ( fn.endswith("main.cc") or fn.endswith("test.cc") or fn.endswith("unittest.cc") ) ] CTC_DECODER_FILES = [ "ctc_beam_search_decoder.cpp", "scorer.cpp", "path_trie.cpp", "decoder_utils.cpp", "workspace_status.cc", "../alphabet.cc", ] def build_archive( srcs=[], out_name="", build_dir="temp_build/temp_build", debug=False, num_parallel=1 ): compiler = os.environ.get("CXX", "g++") if sys.platform.startswith("win"): compiler = '"{}"'.format(compiler) ar = os.environ.get("AR", "ar") libexe = os.environ.get("LIBEXE", "lib.exe") libtool = os.environ.get("LIBTOOL", "libtool") cflags = os.environ.get("CFLAGS", "") + os.environ.get("CXXFLAGS", "") args = ARGS + (DBG_ARGS if debug else OPT_ARGS) for file in srcs: outfile = os.path.join(build_dir, os.path.splitext(file)[0] + ".o") outdir = os.path.dirname(outfile) if not os.path.exists(outdir): print("mkdir", outdir) os.makedirs(outdir) def build_one(file): outfile = os.path.join(build_dir, os.path.splitext(file)[0] + ".o") if os.path.exists(outfile): return if sys.platform.startswith("win"): file = '"{}"'.format(file.replace("\\", "/")) output = '/Fo"{}"'.format(outfile.replace("\\", "/")) else: output = "-o " + outfile cmd = "{cc} -c {cflags} {args} {includes} {infile} {output}".format( cc=compiler, cflags=cflags, args=" ".join(args), includes=" ".join("-I" + i for i in INCLUDES), infile=file, output=output, ) print(cmd) subprocess.check_call(shlex.split(cmd)) return outfile pool = Pool(num_parallel) obj_files = list(pool.imap_unordered(build_one, srcs)) if sys.platform.startswith("darwin"): cmd = "{libtool} -static -o {outfile} {infiles}".format( libtool=libtool, outfile=out_name, infiles=" ".join(obj_files), ) print(cmd) subprocess.check_call(shlex.split(cmd)) elif sys.platform.startswith("win"): cmd = '"{libexe}" /OUT:"{outfile}" {infiles} /MACHINE:X64 /NOLOGO'.format( libexe=libexe, outfile=out_name, infiles=" ".join(obj_files) ) cmd = cmd.replace("\\", "/") print(cmd) subprocess.check_call(shlex.split(cmd)) else: cmd = "{ar} rcs {outfile} {infiles}".format( ar=ar, outfile=out_name, infiles=" ".join(obj_files) ) print(cmd) subprocess.check_call(shlex.split(cmd)) if __name__ == "__main__": build_common()
0
coqui_public_repos/snakepit/src
coqui_public_repos/snakepit/src/utils/scripts.js
const fs = require('fs') const path = require('path') const stream = require('stream') const { spawn } = require('child_process') var exports = module.exports = {} function shellQuote (str) { str = '' + str str = str.replace(/\\/g, '\\\\') str = str.replace(/\'/g, '\\\'') str = str.replace(/(?:\r\n|\r|\n)/g, '\\n') str = '$\'' + str + '\'' return str } exports.shellQuote = shellQuote exports.envToScript = function (env, doExport) { let envScript = [] for (let name of Object.keys(env)) { envScript.push((doExport ? 'export ' : '') + name + '=' + shellQuote(env[name]) + '\n') } return envScript.join('') } var _loadedScripts = {} const _includePrefix = '#INCLUDE ' function _getScript (scriptName, alreadyIncluded) { if (alreadyIncluded.hasOwnProperty(scriptName)) { return '' } if (_loadedScripts.hasOwnProperty(scriptName)) { return _loadedScripts[scriptName] } let scriptPath = path.join(__dirname, '..', '..', 'scripts', scriptName) let script = fs.readFileSync(scriptPath).toString() alreadyIncluded[scriptName] = true script = script .split('\n') .map( l => l.startsWith(_includePrefix) ? _getScript(l.substring(_includePrefix.length), alreadyIncluded) : l ) .join('\n') return _loadedScripts[scriptName] = script } exports.getScript = function(scriptName) { return _getScript(scriptName, {}) } exports.runScript = function(scriptName, env, callback) { if (typeof env == 'function') { callback = env env = {} } env = env || {} let script = _getScript(scriptName, {}) //console.log('Running script "' + scriptPath + '"') p = spawn('bash', ['-s']) let stdout = [] p.stdout.on('data', data => stdout.push(data)) let stderr = [] p.stderr.on('data', data => stderr.push(data)) let called = false let callCallback = code => { if (!called) { called = true callback(code, stdout.join('\n'), stderr.join('\n')) } } p.on('close', code => callCallback(code)) p.on('error', err => callCallback(128)) p.on('exit', code => callCallback(code || 0)) var stdinStream = new stream.Readable() Object.keys(env).forEach(name => stdinStream.push( 'export ' + name + '=' + exports.shellQuote(env[name]) + '\n') ) stdinStream.push(script + '\n') stdinStream.push(null) stdinStream.pipe(p.stdin) return p }
0
coqui_public_repos/STT-models/odia/itml
coqui_public_repos/STT-models/odia/itml/v0.1.0/alphabet.txt
ଁ ଂ ଃ ଅ ଆ ଇ ଈ ଉ ଊ ଋ ଌ ଏ ଐ ଓ ଔ କ ଖ ଗ ଘ ଙ ଚ ଛ ଜ ଝ ଞ ଟ ଠ ଡ ଢ ଣ ତ ଥ ଦ ଧ ନ ପ ଫ ବ ଭ ମ ଯ ର ଲ ଳ ଵ ଶ ଷ ସ ହ ଼ ଽ ା ି ୀ ୁ ୂ ୃ ୄ େ ୈ ୋ ୌ ୍ ୕ ୖ ୗ ଡ଼ ଢ଼ ୟ ୠ ୡ ୢ ୣ ୰ ୱ ୲ ୳ ୴ ୵ ୶ ୷
0
coqui_public_repos/STT/native_client/kenlm/lm
coqui_public_repos/STT/native_client/kenlm/lm/interpolate/split_worker.cc
#include "split_worker.hh" #include "../common/ngram.hh" namespace lm { namespace interpolate { SplitWorker::SplitWorker(std::size_t order, util::stream::Chain &backoff_chain, util::stream::Chain &sort_chain) : order_(order) { backoff_chain >> backoff_input_; sort_chain >> sort_input_; } void SplitWorker::Run(const util::stream::ChainPosition &position) { // input: ngram record (id, prob, and backoff) // output: a float to the backoff_input stream // an ngram id and a float to the sort_input stream for (util::stream::Stream stream(position); stream; ++stream) { NGram<ProbBackoff> ngram(stream.Get(), order_); // write id and prob to the sort stream float prob = ngram.Value().prob; lm::WordIndex *out = reinterpret_cast<lm::WordIndex *>(sort_input_.Get()); for (const lm::WordIndex *it = ngram.begin(); it != ngram.end(); ++it) { *out++ = *it; } *reinterpret_cast<float *>(out) = prob; ++sort_input_; // write backoff to the backoff output stream float boff = ngram.Value().backoff; *reinterpret_cast<float *>(backoff_input_.Get()) = boff; ++backoff_input_; } sort_input_.Poison(); backoff_input_.Poison(); } } }
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/ctcdecode/setup.cfg
# temp_build is two deep because SWIG does not clean relative paths when # building, so ../kenlm pollutes the source directory. [build_ext] build-lib=temp_build/temp_build build-temp=temp_build/temp_build [build_py] build-lib=temp_build/temp_build [bdist_wheel] bdist-dir=temp_build/temp_build [install_lib] build-dir=temp_build/temp_build
0
coqui_public_repos/snakepit/src
coqui_public_repos/snakepit/src/utils/dateTime.js
var exports = module.exports = {} exports.getDuration = function (date1, date2) { let delta = Math.abs(date2 - date1) / 1000 let days = Math.floor(delta / 86400) delta -= days * 86400 let hours = Math.floor(delta / 3600) % 24 delta -= hours * 3600 let minutes = Math.floor(delta / 60) % 60 delta -= minutes * 60 let seconds = Math.floor(delta % 60) return { days: days, hours: hours, minutes: minutes, seconds: seconds } }
0
coqui_public_repos/snakepit
coqui_public_repos/snakepit/scripts/prepare.sh
set -o pipefail ( echo "Preparation started..." set -ex set -o pipefail mkdir "$JOB_DIR/tmp" if [ -n "$CONTINUE_JOB_NUMBER" ]; then cp -r "$DATA_ROOT/pits/$CONTINUE_JOB_NUMBER/keep" "$JOB_DIR/keep" else mkdir "$JOB_DIR/keep" fi job_src_dir="$JOB_DIR/src" archive="$JOB_DIR/archive.tar.gz" if [ -f "$JOB_DIR/origin" ]; then echo "Git based" origin=$(<"$JOB_DIR/origin") git clone $origin "$job_src_dir" cd "$job_src_dir" if [ -f "$JOB_DIR/hash" ]; then hash=$(<"$JOB_DIR/hash") git reset --hard $hash fi git submodule update --recursive git lfs fetch git lfs checkout elif [ -f "$archive" ]; then echo "Archive based" mkdir "$job_src_dir" tar -xf "$archive" -C "$job_src_dir" else mkdir "$job_src_dir" fi cd "$job_src_dir" patch_file="$JOB_DIR/git.patch" if [ -f "$patch_file" ]; then cat "$patch_file" | patch -p0 fi echo "Preparation done." ) 2>&1 | ts '[%Y-%m-%d %H:%M:%S] [prepare]' >>"$JOB_DIR/pit.log"
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/pyenv-build.sh
#!/bin/bash set -xe source $(dirname "$0")/tc-tests-utils.sh unset PYTHON_BIN_PATH unset PYTHONPATH export PATH="${PYENV_ROOT}/bin:$PATH" install_pyenv "${PYENV_ROOT}" install_pyenv_virtualenv "$(pyenv root)/plugins/pyenv-virtualenv" for pyver_conf in ${SUPPORTED_PYTHON_VERSIONS}; do pyver=$(echo "${pyver_conf}" | cut -d':' -f1) pyconf=$(echo "${pyver_conf}" | cut -d':' -f2) pyalias="${pyver}_${pyconf}" PYTHON_CONFIGURE_OPTS="--enable-unicode=${pyconf}" pyenv_install ${pyver} ${pyalias} setup_pyenv_virtualenv "${pyalias}" "deepspeech" virtualenv_activate "${pyalias}" "deepspeech" python --version python3 --version || true # Might fail without any issue on Windows which pip which pip3 || true # Might fail without any issue on Windows virtualenv_deactivate "${pyalias}" "deepspeech" done;
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/extensions/special/rho-fst.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_SPECIAL_RHO_FST_H_ #define FST_EXTENSIONS_SPECIAL_RHO_FST_H_ #include <memory> #include <string> #include <fst/const-fst.h> #include <fst/matcher-fst.h> #include <fst/matcher.h> DECLARE_int64_t(rho_fst_rho_label); DECLARE_string(rho_fst_rewrite_mode); namespace fst { namespace internal { template <class Label> class RhoFstMatcherData { public: explicit RhoFstMatcherData( Label rho_label = FLAGS_rho_fst_rho_label, MatcherRewriteMode rewrite_mode = RewriteMode(FLAGS_rho_fst_rewrite_mode)) : rho_label_(rho_label), rewrite_mode_(rewrite_mode) {} RhoFstMatcherData(const RhoFstMatcherData &data) : rho_label_(data.rho_label_), rewrite_mode_(data.rewrite_mode_) {} static RhoFstMatcherData<Label> *Read(std::istream &istrm, const FstReadOptions &read) { auto *data = new RhoFstMatcherData<Label>(); ReadType(istrm, &data->rho_label_); int32_t rewrite_mode; ReadType(istrm, &rewrite_mode); data->rewrite_mode_ = static_cast<MatcherRewriteMode>(rewrite_mode); return data; } bool Write(std::ostream &ostrm, const FstWriteOptions &opts) const { WriteType(ostrm, rho_label_); WriteType(ostrm, static_cast<int32_t>(rewrite_mode_)); return !ostrm ? false : true; } Label RhoLabel() const { return rho_label_; } MatcherRewriteMode RewriteMode() const { return rewrite_mode_; } private: static MatcherRewriteMode RewriteMode(const string &mode) { if (mode == "auto") return MATCHER_REWRITE_AUTO; if (mode == "always") return MATCHER_REWRITE_ALWAYS; if (mode == "never") return MATCHER_REWRITE_NEVER; LOG(WARNING) << "RhoFst: Unknown rewrite mode: " << mode << ". " << "Defaulting to auto."; return MATCHER_REWRITE_AUTO; } Label rho_label_; MatcherRewriteMode rewrite_mode_; }; } // namespace internal constexpr uint8_t kRhoFstMatchInput = 0x01; // Input matcher is RhoMatcher. constexpr uint8_t kRhoFstMatchOutput = 0x02; // Output matcher is RhoMatcher. template <class M, uint8_t flags = kRhoFstMatchInput | kRhoFstMatchOutput> class RhoFstMatcher : public RhoMatcher<M> { public: using FST = typename M::FST; using Arc = typename M::Arc; using StateId = typename Arc::StateId; using Label = typename Arc::Label; using Weight = typename Arc::Weight; using MatcherData = internal::RhoFstMatcherData<Label>; enum : uint8_t { kFlags = flags }; // This makes a copy of the FST. RhoFstMatcher( const FST &fst, MatchType match_type, std::shared_ptr<MatcherData> data = std::make_shared<MatcherData>()) : RhoMatcher<M>(fst, match_type, RhoLabel(match_type, data ? data->RhoLabel() : MatcherData().RhoLabel()), data ? data->RewriteMode() : MatcherData().RewriteMode()), data_(data) {} // This doesn't copy the FST. RhoFstMatcher( const FST *fst, MatchType match_type, std::shared_ptr<MatcherData> data = std::make_shared<MatcherData>()) : RhoMatcher<M>(fst, match_type, RhoLabel(match_type, data ? data->RhoLabel() : MatcherData().RhoLabel()), data ? data->RewriteMode() : MatcherData().RewriteMode()), data_(data) {} // This makes a copy of the FST. RhoFstMatcher(const RhoFstMatcher<M, flags> &matcher, bool safe = false) : RhoMatcher<M>(matcher, safe), data_(matcher.data_) {} RhoFstMatcher<M, flags> *Copy(bool safe = false) const override { return new RhoFstMatcher<M, flags>(*this, safe); } const MatcherData *GetData() const { return data_.get(); } std::shared_ptr<MatcherData> GetSharedData() const { return data_; } private: static Label RhoLabel(MatchType match_type, Label label) { if (match_type == MATCH_INPUT && flags & kRhoFstMatchInput) return label; if (match_type == MATCH_OUTPUT && flags & kRhoFstMatchOutput) return label; return kNoLabel; } std::shared_ptr<MatcherData> data_; }; extern const char rho_fst_type[]; extern const char input_rho_fst_type[]; extern const char output_rho_fst_type[]; using StdRhoFst = MatcherFst<ConstFst<StdArc>, RhoFstMatcher<SortedMatcher<ConstFst<StdArc>>>, rho_fst_type>; using LogRhoFst = MatcherFst<ConstFst<LogArc>, RhoFstMatcher<SortedMatcher<ConstFst<LogArc>>>, rho_fst_type>; using Log64RhoFst = MatcherFst<ConstFst<Log64Arc>, RhoFstMatcher<SortedMatcher<ConstFst<Log64Arc>>>, input_rho_fst_type>; using StdInputRhoFst = MatcherFst<ConstFst<StdArc>, RhoFstMatcher<SortedMatcher<ConstFst<StdArc>>, kRhoFstMatchInput>, input_rho_fst_type>; using LogInputRhoFst = MatcherFst<ConstFst<LogArc>, RhoFstMatcher<SortedMatcher<ConstFst<LogArc>>, kRhoFstMatchInput>, input_rho_fst_type>; using Log64InputRhoFst = MatcherFst< ConstFst<Log64Arc>, RhoFstMatcher<SortedMatcher<ConstFst<Log64Arc>>, kRhoFstMatchInput>, input_rho_fst_type>; using StdOutputRhoFst = MatcherFst<ConstFst<StdArc>, RhoFstMatcher<SortedMatcher<ConstFst<StdArc>>, kRhoFstMatchOutput>, output_rho_fst_type>; using LogOutputRhoFst = MatcherFst<ConstFst<LogArc>, RhoFstMatcher<SortedMatcher<ConstFst<LogArc>>, kRhoFstMatchOutput>, output_rho_fst_type>; using Log64OutputRhoFst = MatcherFst< ConstFst<Log64Arc>, RhoFstMatcher<SortedMatcher<ConstFst<Log64Arc>>, kRhoFstMatchOutput>, output_rho_fst_type>; } // namespace fst #endif // FST_EXTENSIONS_SPECIAL_RHO_FST_H_
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/configure.ac
AC_INIT([OpenFst], [1.6.9], [help@www.openfst.org]) AM_INIT_AUTOMAKE([foreign nostdinc -Wall -Werror subdir-objects]) AM_PROG_AR # OpenFst does not throw exceptions, so we do not generate exception handling # code. However, users are free to re-enable exception handling. # OpenFst assumes char is unsigned; -fsigned-char is likely unsafe. CPPFLAGS="$CPPFLAGS -fno-exceptions -funsigned-char" CXXFLAGS="$CXXFLAGS -std=c++11" AC_PROG_CXX AC_DISABLE_STATIC AC_PROG_LIBTOOL AC_CONFIG_HEADERS([config.h src/include/fst/config.h]) AC_CONFIG_SRCDIR([src/lib/fst.cc]) AC_CONFIG_FILES([ Makefile src/Makefile src/include/Makefile src/lib/Makefile src/bin/Makefile src/test/Makefile src/extensions/Makefile src/extensions/compact/Makefile src/extensions/compress/Makefile src/extensions/const/Makefile src/extensions/far/Makefile src/extensions/linear/Makefile src/extensions/lookahead/Makefile src/extensions/mpdt/Makefile src/extensions/ngram/Makefile src/extensions/pdt/Makefile src/extensions/python/Makefile src/extensions/special/Makefile src/script/Makefile ]) AC_CONFIG_MACRO_DIR([m4]) AC_LANG([C++]) AC_ARG_ENABLE([compact-fsts], [AS_HELP_STRING([--enable-compact-fsts], [enable CompactFst extensions])], [], [enable_compact_fsts=no]) AM_CONDITIONAL([HAVE_COMPACT], [test "x$enable_compact_fsts" != xno]) AC_ARG_ENABLE([compress], [AS_HELP_STRING([--enable-compress], [enable compression extension])], [], [enable_compress=no]) AM_CONDITIONAL([HAVE_COMPRESS], [test "x$enable_compress" != xno]) AC_ARG_ENABLE([const-fsts], [AS_HELP_STRING([--enable-const-fsts], [enable ConstFst extensions])], [], [enable_const_fsts=no]) AM_CONDITIONAL([HAVE_CONST], [test "x$enable_const_fsts" != xno]) AC_ARG_ENABLE([far], [AS_HELP_STRING([--enable-far], [enable FAR extensions])], [], [enable_far=no]) AM_CONDITIONAL([HAVE_FAR], [test "x$enable_far" != xno]) AC_ARG_ENABLE([linear-fsts], [AS_HELP_STRING([--enable-linear-fsts], [enable LinearTagger/ClassifierFst extensions])], [], [enable_linear_fsts=no]) AM_CONDITIONAL([HAVE_LINEAR], [test "x$enable_linear_fsts" != xno]) AC_ARG_ENABLE([lookahead-fsts], [AS_HELP_STRING([--enable-lookahead-fsts], [enable LookAheadFst extensions])], [], [enable_lookahead_fsts=no]) AM_CONDITIONAL([HAVE_LOOKAHEAD], [test "x$enable_lookahead_fsts" != xno]) AC_ARG_ENABLE([mpdt], [AS_HELP_STRING([--enable-mpdt], [enable MPDT extensions])], [], [enable_mpdt=no]) AM_CONDITIONAL([HAVE_MPDT], [test "x$enable_mpdt" != xno]) AC_ARG_ENABLE([ngram-fsts], [AS_HELP_STRING([--enable-ngram-fsts], [enable NGramFst extension])], [], [enable_ngram_fsts=no]) AM_CONDITIONAL([HAVE_NGRAM], [test "x$enable_ngram_fsts" != xno]) AC_ARG_ENABLE([pdt], [AS_HELP_STRING([--enable-pdt], [enable PDT extensions])], [], [enable_pdt=no]) AM_CONDITIONAL([HAVE_PDT], [test "x$enable_pdt" != xno]) AC_ARG_ENABLE([python], [AS_HELP_STRING([--enable-python], [enable Python extensions])], [], [enable_python=no]) AM_CONDITIONAL([HAVE_PYTHON], [test "x$enable_python" != xno]) if test "x$enable_python" != xno; then AM_PATH_PYTHON(2.7) AC_PYTHON_DEVEL([>= '2.7']) fi AC_ARG_ENABLE([special], [AS_HELP_STRING([--enable-special], [enable special-matcher extensions])], [], [enable_special=no]) AM_CONDITIONAL([HAVE_SPECIAL], [test "x$enable_special" != xno]) # --enable-bin enables script and bin "extensions". AC_ARG_ENABLE([bin], [AS_HELP_STRING([--enable-bin], [enable fst::script and command-line binaries])], [], [enable_bin=yes]) AM_CONDITIONAL([HAVE_BIN], [test "x$enable_bin" != xno]) AM_CONDITIONAL([HAVE_SCRIPT], [test "x$enable_bin" != xno]) # --enable-grm enables dependencies of OpenGrm: far, mpdt, and pdt. AC_ARG_ENABLE([grm], [AS_HELP_STRING([--enable-grm], [enable all dependencies of OpenGrm])], [], [enable_grm=no]) AM_CONDITIONAL([HAVE_GRM], [test "x$enable_grm" != xno]) AC_ARG_WITH([libfstdir], [--with-libfstdir[=DIR] fst dynamic extensions [[LIBDIR/fst]]], [], [with_libfstdir=[${libdir}/fst]]) AC_SUBST([libfstdir], $with_libfstdir) AC_CHECK_LIB([dl], dlopen, [DL_LIBS=-ldl]) AC_SUBST([DL_LIBS]) AC_OUTPUT
0
coqui_public_repos/inference-engine/third_party
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/install-sh
#!/bin/sh # install - install a program, script, or datafile scriptversion=2014-09-12.12; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. tab=' ' nl=' ' IFS=" $tab$nl" # Set DOITPROG to "echo" to test this script. doit=${DOITPROG-} doit_exec=${doit:-exec} # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false is_target_a_directory=possibly usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) is_target_a_directory=always dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) is_target_a_directory=never;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done # We allow the use of options -d and -T together, by making -d # take the precedence; this is for compatibility with GNU install. if test -n "$dir_arg"; then if test -n "$dst_arg"; then echo "$0: target directory not allowed when installing a directory." >&2 exit 1 fi fi if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then if test $# -gt 1 || test "$is_target_a_directory" = always; then if test ! -d "$dst_arg"; then echo "$0: $dst_arg: Is not a directory." >&2 exit 1 fi fi fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test "$is_target_a_directory" = never; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else dstdir=`dirname "$dst"` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) # $RANDOM is not portable (e.g. dash); use it when possible to # lower collision chance tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null; exit $ret' 0 # As "mkdir -p" follows symlinks and we work in /tmp possibly; so # create the $tmpdir first (and fail if unsuccessful) to make sure # that nobody tries to guess the $tmpdir name. if (umask $mkdir_umask && $mkdirprog $mkdir_mode "$tmpdir" && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. test_tmpdir="$tmpdir/a" ls_ld_tmpdir=`ls -ld "$test_tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac oIFS=$IFS IFS=/ set -f set fnord $dstdir shift set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End:
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/mpdt/reverse.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Reverses an MPDT. #ifndef FST_EXTENSIONS_MPDT_REVERSE_H_ #define FST_EXTENSIONS_MPDT_REVERSE_H_ #include <limits> #include <vector> #include <fst/mutable-fst.h> #include <fst/relabel.h> #include <fst/reverse.h> namespace fst { // Reverses a multi-stack pushdown transducer (MPDT) encoded as an FST. template <class Arc, class RevArc> void Reverse( const Fst<Arc> &ifst, const std::vector<std::pair<typename Arc::Label, typename Arc::Label>> &parens, std::vector<typename Arc::Label> *assignments, MutableFst<RevArc> *ofst) { using Label = typename Arc::Label; // Reverses FST component. Reverse(ifst, ofst); // Exchanges open and close parenthesis pairs. std::vector<std::pair<Label, Label>> relabel_pairs; relabel_pairs.reserve(2 * parens.size()); for (const auto &pair : parens) { relabel_pairs.emplace_back(pair.first, pair.second); relabel_pairs.emplace_back(pair.second, pair.first); } Relabel(ofst, relabel_pairs, relabel_pairs); // Computes new bounds for the stack assignments. Label max_level = -1; Label min_level = std::numeric_limits<Label>::max(); for (const auto assignment : *assignments) { if (assignment < min_level) { min_level = assignment; } else if (assignment > max_level) { max_level = assignment; } } // Actually reverses stack assignments. for (auto &assignment : *assignments) { assignment = (max_level - assignment) + min_level; } } } // namespace fst #endif // FST_EXTENSIONS_MPDT_REVERSE_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include
coqui_public_repos/inference-engine/third_party/openfst-1.6.9-win/src/include/fst/invert.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Functions and classes to invert an FST. #ifndef FST_INVERT_H_ #define FST_INVERT_H_ #include <fst/arc-map.h> #include <fst/mutable-fst.h> namespace fst { // Mapper to implement inversion of an arc. template <class A> struct InvertMapper { using FromArc = A; using ToArc = A; InvertMapper() {} ToArc operator()(const FromArc &arc) const { return ToArc(arc.olabel, arc.ilabel, arc.weight, arc.nextstate); } constexpr MapFinalAction FinalAction() const { return MAP_NO_SUPERFINAL; } constexpr MapSymbolsAction InputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } constexpr MapSymbolsAction OutputSymbolsAction() const { return MAP_CLEAR_SYMBOLS; } uint64_t Properties(uint64_t props) const { return InvertProperties(props); } }; // Inverts the transduction corresponding to an FST by exchanging the // FST's input and output labels. // // Complexity: // // Time: O(V + E) // Space: O(1) // // where V is the number of states and E is the number of arcs. template <class Arc> inline void Invert(const Fst<Arc> &ifst, MutableFst<Arc> *ofst) { std::unique_ptr<SymbolTable> input( ifst.InputSymbols() ? ifst.InputSymbols()->Copy() : nullptr); std::unique_ptr<SymbolTable> output( ifst.OutputSymbols() ? ifst.OutputSymbols()->Copy() : nullptr); ArcMap(ifst, ofst, InvertMapper<Arc>()); ofst->SetInputSymbols(output.get()); ofst->SetOutputSymbols(input.get()); } // Destructive variant of the above. template <class Arc> inline void Invert(MutableFst<Arc> *fst) { std::unique_ptr<SymbolTable> input( fst->InputSymbols() ? fst->InputSymbols()->Copy() : nullptr); std::unique_ptr<SymbolTable> output( fst->OutputSymbols() ? fst->OutputSymbols()->Copy() : nullptr); ArcMap(fst, InvertMapper<Arc>()); fst->SetInputSymbols(output.get()); fst->SetOutputSymbols(input.get()); } // Inverts the transduction corresponding to an FST by exchanging the // FST's input and output labels. This version is a delayed FST. // // Complexity: // // Time: O(v + e) // Space: O(1) // // where v is the number of states visited and e is the number of arcs visited. // Constant time and to visit an input state or arc is assumed and exclusive of // caching. template <class A> class InvertFst : public ArcMapFst<A, A, InvertMapper<A>> { public: using Arc = A; using Mapper = InvertMapper<Arc>; using Impl = internal::ArcMapFstImpl<A, A, InvertMapper<A>>; explicit InvertFst(const Fst<Arc> &fst) : ArcMapFst<Arc, Arc, Mapper>(fst, Mapper()) { GetMutableImpl()->SetOutputSymbols(fst.InputSymbols()); GetMutableImpl()->SetInputSymbols(fst.OutputSymbols()); } // See Fst<>::Copy() for doc. InvertFst(const InvertFst<Arc> &fst, bool safe = false) : ArcMapFst<Arc, Arc, Mapper>(fst, safe) {} // Get a copy of this InvertFst. See Fst<>::Copy() for further doc. InvertFst<Arc> *Copy(bool safe = false) const override { return new InvertFst(*this, safe); } private: using ImplToFst<Impl>::GetMutableImpl; }; // Specialization for InvertFst. template <class Arc> class StateIterator<InvertFst<Arc>> : public StateIterator<ArcMapFst<Arc, Arc, InvertMapper<Arc>>> { public: explicit StateIterator(const InvertFst<Arc> &fst) : StateIterator<ArcMapFst<Arc, Arc, InvertMapper<Arc>>>(fst) {} }; // Specialization for InvertFst. template <class Arc> class ArcIterator<InvertFst<Arc>> : public ArcIterator<ArcMapFst<Arc, Arc, InvertMapper<Arc>>> { public: using StateId = typename Arc::StateId; ArcIterator(const InvertFst<Arc> &fst, StateId s) : ArcIterator<ArcMapFst<Arc, Arc, InvertMapper<Arc>>>(fst, s) {} }; // Useful alias when using StdArc. using StdInvertFst = InvertFst<StdArc>; } // namespace fst #endif // FST_INVERT_H_
0
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions
coqui_public_repos/inference-engine/third_party/openfst-1.6.7/src/include/fst/extensions/linear/linearscript.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. #ifndef FST_EXTENSIONS_LINEAR_LINEARSCRIPT_H_ #define FST_EXTENSIONS_LINEAR_LINEARSCRIPT_H_ #include <istream> #include <sstream> #include <string> #include <vector> #include <fst/compat.h> #include <fst/extensions/linear/linear-fst-data-builder.h> #include <fst/extensions/linear/linear-fst.h> #include <fstream> #include <fst/symbol-table.h> #include <fst/script/arg-packs.h> #include <fst/script/script-impl.h> DECLARE_string(delimiter); DECLARE_string(empty_symbol); DECLARE_string(start_symbol); DECLARE_string(end_symbol); DECLARE_bool(classifier); namespace fst { namespace script { typedef std::tuple<const string &, const string &, const string &, char **, int, const string &, const string &, const string &, const string &> LinearCompileArgs; bool ValidateDelimiter(); bool ValidateEmptySymbol(); // Returns the proper label given the symbol. For symbols other than // `FLAGS_start_symbol` or `FLAGS_end_symbol`, looks up the symbol // table to decide the label. Depending on whether // `FLAGS_start_symbol` and `FLAGS_end_symbol` are identical, it // either returns `kNoLabel` for later processing or decides the label // right away. template <class Arc> inline typename Arc::Label LookUp(const string &str, SymbolTable *syms) { if (str == FLAGS_start_symbol) return str == FLAGS_end_symbol ? kNoLabel : LinearFstData<Arc>::kStartOfSentence; else if (str == FLAGS_end_symbol) return LinearFstData<Arc>::kEndOfSentence; else return syms->AddSymbol(str); } // Splits `str` with `delim` as the delimiter and stores the labels in // `output`. template <class Arc> void SplitAndPush(const string &str, const char delim, SymbolTable *syms, std::vector<typename Arc::Label> *output) { if (str == FLAGS_empty_symbol) return; std::istringstream strm(str); string buf; while (std::getline(strm, buf, delim)) output->push_back(LookUp<Arc>(buf, syms)); } // Like `std::replace_copy` but returns the number of modifications template <class InputIterator, class OutputIterator, class T> size_t ReplaceCopy(InputIterator first, InputIterator last, OutputIterator result, const T &old_value, const T &new_value) { size_t changes = 0; while (first != last) { if (*first == old_value) { *result = new_value; ++changes; } else { *result = *first; } ++first; ++result; } return changes; } template <class Arc> bool GetVocabRecord(const string &vocab, std::istream &strm, // NOLINT SymbolTable *isyms, SymbolTable *fsyms, SymbolTable *osyms, typename Arc::Label *word, std::vector<typename Arc::Label> *feature_labels, std::vector<typename Arc::Label> *possible_labels, size_t *num_line); template <class Arc> bool GetModelRecord(const string &model, std::istream &strm, // NOLINT SymbolTable *fsyms, SymbolTable *osyms, std::vector<typename Arc::Label> *input_labels, std::vector<typename Arc::Label> *output_labels, typename Arc::Weight *weight, size_t *num_line); // Reads in vocabulary file. Each line is in the following format // // word <whitespace> features [ <whitespace> possible output ] // // where features and possible output are `FLAGS_delimiter`-delimited lists of // tokens template <class Arc> void AddVocab(const string &vocab, SymbolTable *isyms, SymbolTable *fsyms, SymbolTable *osyms, LinearFstDataBuilder<Arc> *builder) { std::ifstream in(vocab); if (!in) LOG(FATAL) << "Can't open file: " << vocab; size_t num_line = 0, num_added = 0; std::vector<string> fields; std::vector<typename Arc::Label> feature_labels, possible_labels; typename Arc::Label word; while (GetVocabRecord<Arc>(vocab, in, isyms, fsyms, osyms, &word, &feature_labels, &possible_labels, &num_line)) { if (word == kNoLabel) { LOG(WARNING) << "Ignored: boundary word: " << fields[0]; continue; } if (possible_labels.empty()) num_added += builder->AddWord(word, feature_labels); else num_added += builder->AddWord(word, feature_labels, possible_labels); } VLOG(1) << "Read " << num_added << " words in " << num_line << " lines from " << vocab; } template <class Arc> void AddVocab(const string &vocab, SymbolTable *isyms, SymbolTable *fsyms, SymbolTable *osyms, LinearClassifierFstDataBuilder<Arc> *builder) { std::ifstream in(vocab); if (!in) LOG(FATAL) << "Can't open file: " << vocab; size_t num_line = 0, num_added = 0; std::vector<string> fields; std::vector<typename Arc::Label> feature_labels, possible_labels; typename Arc::Label word; while (GetVocabRecord<Arc>(vocab, in, isyms, fsyms, osyms, &word, &feature_labels, &possible_labels, &num_line)) { if (!possible_labels.empty()) LOG(FATAL) << "Classifier vocabulary should not have possible output constraint"; if (word == kNoLabel) { LOG(WARNING) << "Ignored: boundary word: " << fields[0]; continue; } num_added += builder->AddWord(word, feature_labels); } VLOG(1) << "Read " << num_added << " words in " << num_line << " lines from " << vocab; } // Reads in model file. The first line is an integer designating the // size of future window in the input sequences. After this, each line // is in the following format // // input sequence <whitespace> output sequence <whitespace> weight // // input sequence is a `FLAGS_delimiter`-delimited sequence of feature // labels (see `AddVocab()`) . output sequence is a // `FLAGS_delimiter`-delimited sequence of output labels where the // last label is the output of the feature position before the history // boundary. template <class Arc> void AddModel(const string &model, SymbolTable *fsyms, SymbolTable *osyms, LinearFstDataBuilder<Arc> *builder) { std::ifstream in(model); if (!in) LOG(FATAL) << "Can't open file: " << model; string line; std::getline(in, line); if (!in) LOG(FATAL) << "Empty file: " << model; size_t future_size; { std::istringstream strm(line); strm >> future_size; if (!strm) LOG(FATAL) << "Can't read future size: " << model; } size_t num_line = 1, num_added = 0; const int group = builder->AddGroup(future_size); VLOG(1) << "Group " << group << ": from " << model << "; future size is " << future_size << "."; // Add the rest of lines as a single feature group std::vector<string> fields; std::vector<typename Arc::Label> input_labels, output_labels; typename Arc::Weight weight; while (GetModelRecord<Arc>(model, in, fsyms, osyms, &input_labels, &output_labels, &weight, &num_line)) { if (output_labels.empty()) LOG(FATAL) << "Empty output sequence in source " << model << ", line " << num_line; const typename Arc::Label marks[] = {LinearFstData<Arc>::kStartOfSentence, LinearFstData<Arc>::kEndOfSentence}; std::vector<typename Arc::Label> copy_input(input_labels.size()), copy_output(output_labels.size()); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { size_t num_input_changes = ReplaceCopy(input_labels.begin(), input_labels.end(), copy_input.begin(), kNoLabel, marks[i]); size_t num_output_changes = ReplaceCopy(output_labels.begin(), output_labels.end(), copy_output.begin(), kNoLabel, marks[j]); if ((num_input_changes > 0 || i == 0) && (num_output_changes > 0 || j == 0)) num_added += builder->AddWeight(group, copy_input, copy_output, weight); } } } VLOG(1) << "Group " << group << ": read " << num_added << " weight(s) in " << num_line << " lines."; } template <class Arc> void AddModel(const string &model, SymbolTable *fsyms, SymbolTable *osyms, LinearClassifierFstDataBuilder<Arc> *builder) { std::ifstream in(model); if (!in) LOG(FATAL) << "Can't open file: " << model; string line; std::getline(in, line); if (!in) LOG(FATAL) << "Empty file: " << model; size_t future_size; { std::istringstream strm(line); strm >> future_size; if (!strm) LOG(FATAL) << "Can't read future size: " << model; } if (future_size != 0) LOG(FATAL) << "Classifier model must have future size = 0; got " << future_size << " from " << model; size_t num_line = 1, num_added = 0; const int group = builder->AddGroup(); VLOG(1) << "Group " << group << ": from " << model << "; future size is " << future_size << "."; // Add the rest of lines as a single feature group std::vector<string> fields; std::vector<typename Arc::Label> input_labels, output_labels; typename Arc::Weight weight; while (GetModelRecord<Arc>(model, in, fsyms, osyms, &input_labels, &output_labels, &weight, &num_line)) { if (output_labels.size() != 1) LOG(FATAL) << "Output not a single label in source " << model << ", line " << num_line; const typename Arc::Label marks[] = {LinearFstData<Arc>::kStartOfSentence, LinearFstData<Arc>::kEndOfSentence}; typename Arc::Label pred = output_labels[0]; std::vector<typename Arc::Label> copy_input(input_labels.size()); for (int i = 0; i < 2; ++i) { size_t num_input_changes = ReplaceCopy(input_labels.begin(), input_labels.end(), copy_input.begin(), kNoLabel, marks[i]); if (num_input_changes > 0 || i == 0) num_added += builder->AddWeight(group, copy_input, pred, weight); } } VLOG(1) << "Group " << group << ": read " << num_added << " weight(s) in " << num_line << " lines."; } void SplitByWhitespace(const string &str, std::vector<string> *out); int ScanNumClasses(char **models, int models_length); template <class Arc> void LinearCompileTpl(LinearCompileArgs *args) { const string &epsilon_symbol = std::get<0>(*args); const string &unknown_symbol = std::get<1>(*args); const string &vocab = std::get<2>(*args); char **models = std::get<3>(*args); const int models_length = std::get<4>(*args); const string &out = std::get<5>(*args); const string &save_isymbols = std::get<6>(*args); const string &save_fsymbols = std::get<7>(*args); const string &save_osymbols = std::get<8>(*args); SymbolTable isyms, // input (e.g. word tokens) osyms, // output (e.g. tags) fsyms; // feature (e.g. word identity, suffix, etc.) isyms.AddSymbol(epsilon_symbol); osyms.AddSymbol(epsilon_symbol); fsyms.AddSymbol(epsilon_symbol); isyms.AddSymbol(unknown_symbol); VLOG(1) << "start-of-sentence label is " << LinearFstData<Arc>::kStartOfSentence; VLOG(1) << "end-of-sentence label is " << LinearFstData<Arc>::kEndOfSentence; if (FLAGS_classifier) { int num_classes = ScanNumClasses(models, models_length); LinearClassifierFstDataBuilder<Arc> builder(num_classes, &isyms, &fsyms, &osyms); AddVocab(vocab, &isyms, &fsyms, &osyms, &builder); for (int i = 0; i < models_length; ++i) AddModel(models[i], &fsyms, &osyms, &builder); LinearClassifierFst<Arc> fst(builder.Dump(), num_classes, &isyms, &osyms); fst.Write(out); } else { LinearFstDataBuilder<Arc> builder(&isyms, &fsyms, &osyms); AddVocab(vocab, &isyms, &fsyms, &osyms, &builder); for (int i = 0; i < models_length; ++i) AddModel(models[i], &fsyms, &osyms, &builder); LinearTaggerFst<Arc> fst(builder.Dump(), &isyms, &osyms); fst.Write(out); } if (!save_isymbols.empty()) isyms.WriteText(save_isymbols); if (!save_fsymbols.empty()) fsyms.WriteText(save_fsymbols); if (!save_osymbols.empty()) osyms.WriteText(save_osymbols); } void LinearCompile(const string &arc_type, const string &epsilon_symbol, const string &unknown_symbol, const string &vocab, char **models, int models_len, const string &out, const string &save_isymbols, const string &save_fsymbols, const string &save_osymbols); template <class Arc> bool GetVocabRecord(const string &vocab, std::istream &strm, // NOLINT SymbolTable *isyms, SymbolTable *fsyms, SymbolTable *osyms, typename Arc::Label *word, std::vector<typename Arc::Label> *feature_labels, std::vector<typename Arc::Label> *possible_labels, size_t *num_line) { string line; if (!std::getline(strm, line)) return false; ++(*num_line); std::vector<string> fields; SplitByWhitespace(line, &fields); if (fields.size() != 3) LOG(FATAL) << "Wrong number of fields in source " << vocab << ", line " << num_line; feature_labels->clear(); possible_labels->clear(); *word = LookUp<Arc>(fields[0], isyms); const char delim = FLAGS_delimiter[0]; SplitAndPush<Arc>(fields[1], delim, fsyms, feature_labels); SplitAndPush<Arc>(fields[2], delim, osyms, possible_labels); return true; } template <class Arc> bool GetModelRecord(const string &model, std::istream &strm, // NOLINT SymbolTable *fsyms, SymbolTable *osyms, std::vector<typename Arc::Label> *input_labels, std::vector<typename Arc::Label> *output_labels, typename Arc::Weight *weight, size_t *num_line) { string line; if (!std::getline(strm, line)) return false; ++(*num_line); std::vector<string> fields; SplitByWhitespace(line, &fields); if (fields.size() != 3) LOG(FATAL) << "Wrong number of fields in source " << model << ", line " << num_line; input_labels->clear(); output_labels->clear(); const char delim = FLAGS_delimiter[0]; SplitAndPush<Arc>(fields[0], delim, fsyms, input_labels); SplitAndPush<Arc>(fields[1], delim, osyms, output_labels); *weight = StrToWeight<typename Arc::Weight>(fields[2], model, *num_line); GuessStartOrEnd<Arc>(input_labels, kNoLabel); GuessStartOrEnd<Arc>(output_labels, kNoLabel); return true; } } // namespace script } // namespace fst #define REGISTER_FST_LINEAR_OPERATIONS(Arc) \ REGISTER_FST_OPERATION(LinearCompileTpl, Arc, LinearCompileArgs); #endif // FST_EXTENSIONS_LINEAR_LINEARSCRIPT_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/extensions/mpdt/Makefile.in
# Makefile.in generated by automake 1.14.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @HAVE_BIN_TRUE@bin_PROGRAMS = mpdtcompose$(EXEEXT) mpdtexpand$(EXEEXT) \ @HAVE_BIN_TRUE@ mpdtinfo$(EXEEXT) mpdtreverse$(EXEEXT) subdir = src/extensions/mpdt DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_DEPENDENCIES = \ @HAVE_SCRIPT_TRUE@ ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__libfstmpdtscript_la_SOURCES_DIST = mpdtscript.cc @HAVE_SCRIPT_TRUE@am_libfstmpdtscript_la_OBJECTS = mpdtscript.lo libfstmpdtscript_la_OBJECTS = $(am_libfstmpdtscript_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libfstmpdtscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) $(libfstmpdtscript_la_LDFLAGS) \ $(LDFLAGS) -o $@ @HAVE_SCRIPT_TRUE@am_libfstmpdtscript_la_rpath = -rpath $(libdir) PROGRAMS = $(bin_PROGRAMS) am__mpdtcompose_SOURCES_DIST = mpdtcompose.cc @HAVE_BIN_TRUE@am_mpdtcompose_OBJECTS = mpdtcompose.$(OBJEXT) mpdtcompose_OBJECTS = $(am_mpdtcompose_OBJECTS) mpdtcompose_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtcompose_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtexpand_SOURCES_DIST = mpdtexpand.cc @HAVE_BIN_TRUE@am_mpdtexpand_OBJECTS = mpdtexpand.$(OBJEXT) mpdtexpand_OBJECTS = $(am_mpdtexpand_OBJECTS) mpdtexpand_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtexpand_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtinfo_SOURCES_DIST = mpdtinfo.cc @HAVE_BIN_TRUE@am_mpdtinfo_OBJECTS = mpdtinfo.$(OBJEXT) mpdtinfo_OBJECTS = $(am_mpdtinfo_OBJECTS) mpdtinfo_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtinfo_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) am__mpdtreverse_SOURCES_DIST = mpdtreverse.cc @HAVE_BIN_TRUE@am_mpdtreverse_OBJECTS = mpdtreverse.$(OBJEXT) mpdtreverse_OBJECTS = $(am_mpdtreverse_OBJECTS) mpdtreverse_LDADD = $(LDADD) @HAVE_BIN_TRUE@mpdtreverse_DEPENDENCIES = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la $(am__DEPENDENCIES_1) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libfstmpdtscript_la_SOURCES) $(mpdtcompose_SOURCES) \ $(mpdtexpand_SOURCES) $(mpdtinfo_SOURCES) \ $(mpdtreverse_SOURCES) DIST_SOURCES = $(am__libfstmpdtscript_la_SOURCES_DIST) \ $(am__mpdtcompose_SOURCES_DIST) $(am__mpdtexpand_SOURCES_DIST) \ $(am__mpdtinfo_SOURCES_DIST) $(am__mpdtreverse_SOURCES_DIST) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = -I$(srcdir)/../../include $(ICU_CPPFLAGS) @HAVE_BIN_TRUE@LDADD = libfstmpdtscript.la \ @HAVE_BIN_TRUE@ ../pdt/libfstpdtscript.la \ @HAVE_BIN_TRUE@ ../../script/libfstscript.la \ @HAVE_BIN_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) @HAVE_BIN_TRUE@mpdtcompose_SOURCES = mpdtcompose.cc @HAVE_BIN_TRUE@mpdtexpand_SOURCES = mpdtexpand.cc @HAVE_BIN_TRUE@mpdtinfo_SOURCES = mpdtinfo.cc @HAVE_BIN_TRUE@mpdtreverse_SOURCES = mpdtreverse.cc @HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstmpdtscript.la @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_SOURCES = mpdtscript.cc @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_LDFLAGS = -version-info 10:0:0 @HAVE_SCRIPT_TRUE@libfstmpdtscript_la_LIBADD = ../../script/libfstscript.la \ @HAVE_SCRIPT_TRUE@ ../../lib/libfst.la -lm $(DL_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cc .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/extensions/mpdt/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/extensions/mpdt/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libfstmpdtscript.la: $(libfstmpdtscript_la_OBJECTS) $(libfstmpdtscript_la_DEPENDENCIES) $(EXTRA_libfstmpdtscript_la_DEPENDENCIES) $(AM_V_CXXLD)$(libfstmpdtscript_la_LINK) $(am_libfstmpdtscript_la_rpath) $(libfstmpdtscript_la_OBJECTS) $(libfstmpdtscript_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list mpdtcompose$(EXEEXT): $(mpdtcompose_OBJECTS) $(mpdtcompose_DEPENDENCIES) $(EXTRA_mpdtcompose_DEPENDENCIES) @rm -f mpdtcompose$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtcompose_OBJECTS) $(mpdtcompose_LDADD) $(LIBS) mpdtexpand$(EXEEXT): $(mpdtexpand_OBJECTS) $(mpdtexpand_DEPENDENCIES) $(EXTRA_mpdtexpand_DEPENDENCIES) @rm -f mpdtexpand$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtexpand_OBJECTS) $(mpdtexpand_LDADD) $(LIBS) mpdtinfo$(EXEEXT): $(mpdtinfo_OBJECTS) $(mpdtinfo_DEPENDENCIES) $(EXTRA_mpdtinfo_DEPENDENCIES) @rm -f mpdtinfo$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtinfo_OBJECTS) $(mpdtinfo_LDADD) $(LIBS) mpdtreverse$(EXEEXT): $(mpdtreverse_OBJECTS) $(mpdtreverse_DEPENDENCIES) $(EXTRA_mpdtreverse_DEPENDENCIES) @rm -f mpdtreverse$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(mpdtreverse_OBJECTS) $(mpdtreverse_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtcompose.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtexpand.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtinfo.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtreverse.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpdtscript.Plo@am__quote@ .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cc.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) install-binPROGRAMS: install-libLTLIBRARIES installdirs: for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \ uninstall-libLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/TTS/recipes
coqui_public_repos/TTS/recipes/vctk/download_vctk.sh
#!/usr/bin/env bash # take the scripts's parent's directory to prefix all the output paths. RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" echo $RUN_DIR # download VCTK dataset wget https://datashare.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip -O VCTK-Corpus-0.92.zip # extract mkdir VCTK unzip VCTK-Corpus-0.92 -d VCTK # create train-val splits mv VCTK $RUN_DIR/recipes/vctk/ rm VCTK-Corpus-0.92.zip
0
coqui_public_repos/STT/native_client/dotnet/STTWPF
coqui_public_repos/STT/native_client/dotnet/STTWPF/ViewModels/BindableBase.cs
using System; using System.Collections.Generic; using System.ComponentModel; using System.Runtime.CompilerServices; namespace STT.WPF.ViewModels { /// <summary> /// Implementation of <see cref="INotifyPropertyChanged"/> to simplify models. /// </summary> public abstract class BindableBase : INotifyPropertyChanged { /// <summary> /// Checks if a property already matches a desired value. Sets the property and /// notifies listeners only when necessary. /// </summary> /// <typeparam name="T">Type of the property.</typeparam> /// <param name="storage">Reference to a property with both getter and setter.</param> /// <param name="value">Desired value for the property.</param> /// <param name="propertyName">Name of the property used to notify listeners. This /// value is optional and can be provided automatically when invoked from compilers that /// support CallerMemberName.</param> /// <returns>True if the value was changed, false if the existing value matched the /// desired value.</returns> protected bool SetProperty<T>(ref T backingStore, T value, [CallerMemberName]string propertyName = "", Action onChanged = null) { if (EqualityComparer<T>.Default.Equals(backingStore, value)) return false; backingStore = value; onChanged?.Invoke(); OnPropertyChanged(propertyName); return true; } #region INotifyPropertyChanged /// <summary> /// Notifies listeners that a property value has changed. /// </summary> /// <param name="propertyName">Name of the property used to notify listeners. This /// value is optional and can be provided automatically when invoked from compilers /// that support <see cref="CallerMemberNameAttribute"/>.</param> public event PropertyChangedEventHandler PropertyChanged; protected void OnPropertyChanged([CallerMemberName] string propertyName = "") => PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName)); #endregion } }
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.9-win/src/script/Makefile.in
# Makefile.in generated by automake 1.15.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2017 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/script ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_python_devel.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h \ $(top_builddir)/src/include/fst/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = @HAVE_SCRIPT_TRUE@libfstscript_la_DEPENDENCIES = ../lib/libfst.la \ @HAVE_SCRIPT_TRUE@ $(am__DEPENDENCIES_1) am__libfstscript_la_SOURCES_DIST = arciterator-class.cc arcsort.cc \ closure.cc compile.cc compose.cc concat.cc connect.cc \ convert.cc decode.cc determinize.cc difference.cc \ disambiguate.cc draw.cc encode.cc encodemapper-class.cc \ epsnormalize.cc equal.cc equivalent.cc fst-class.cc getters.cc \ info-impl.cc info.cc intersect.cc invert.cc isomorphic.cc \ map.cc minimize.cc print.cc project.cc prune.cc push.cc \ randequivalent.cc randgen.cc relabel.cc replace.cc reverse.cc \ reweight.cc rmepsilon.cc shortest-distance.cc shortest-path.cc \ stateiterator-class.cc synchronize.cc text-io.cc topsort.cc \ union.cc weight-class.cc verify.cc @HAVE_SCRIPT_TRUE@am_libfstscript_la_OBJECTS = arciterator-class.lo \ @HAVE_SCRIPT_TRUE@ arcsort.lo closure.lo compile.lo compose.lo \ @HAVE_SCRIPT_TRUE@ concat.lo connect.lo convert.lo decode.lo \ @HAVE_SCRIPT_TRUE@ determinize.lo difference.lo disambiguate.lo \ @HAVE_SCRIPT_TRUE@ draw.lo encode.lo encodemapper-class.lo \ @HAVE_SCRIPT_TRUE@ epsnormalize.lo equal.lo equivalent.lo \ @HAVE_SCRIPT_TRUE@ fst-class.lo getters.lo info-impl.lo info.lo \ @HAVE_SCRIPT_TRUE@ intersect.lo invert.lo isomorphic.lo map.lo \ @HAVE_SCRIPT_TRUE@ minimize.lo print.lo project.lo prune.lo \ @HAVE_SCRIPT_TRUE@ push.lo randequivalent.lo randgen.lo \ @HAVE_SCRIPT_TRUE@ relabel.lo replace.lo reverse.lo reweight.lo \ @HAVE_SCRIPT_TRUE@ rmepsilon.lo shortest-distance.lo \ @HAVE_SCRIPT_TRUE@ shortest-path.lo stateiterator-class.lo \ @HAVE_SCRIPT_TRUE@ synchronize.lo text-io.lo topsort.lo \ @HAVE_SCRIPT_TRUE@ union.lo weight-class.lo verify.lo libfstscript_la_OBJECTS = $(am_libfstscript_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libfstscript_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) $(libfstscript_la_LDFLAGS) \ $(LDFLAGS) -o $@ @HAVE_SCRIPT_TRUE@am_libfstscript_la_rpath = -rpath $(libdir) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libfstscript_la_SOURCES) DIST_SOURCES = $(am__libfstscript_la_SOURCES_DIST) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DL_LIBS = @DL_LIBS@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PYTHON = @PYTHON@ PYTHON_CPPFLAGS = @PYTHON_CPPFLAGS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_EXTRA_LDFLAGS = @PYTHON_EXTRA_LDFLAGS@ PYTHON_EXTRA_LIBS = @PYTHON_EXTRA_LIBS@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_SITE_PKG = @PYTHON_SITE_PKG@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libfstdir = @libfstdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = -I$(srcdir)/../include $(ICU_CPPFLAGS) @HAVE_SCRIPT_TRUE@lib_LTLIBRARIES = libfstscript.la @HAVE_SCRIPT_TRUE@libfstscript_la_SOURCES = arciterator-class.cc arcsort.cc closure.cc \ @HAVE_SCRIPT_TRUE@compile.cc compose.cc concat.cc connect.cc convert.cc decode.cc \ @HAVE_SCRIPT_TRUE@determinize.cc difference.cc disambiguate.cc draw.cc encode.cc \ @HAVE_SCRIPT_TRUE@encodemapper-class.cc epsnormalize.cc equal.cc equivalent.cc fst-class.cc \ @HAVE_SCRIPT_TRUE@getters.cc info-impl.cc info.cc intersect.cc invert.cc isomorphic.cc map.cc \ @HAVE_SCRIPT_TRUE@minimize.cc print.cc project.cc prune.cc push.cc randequivalent.cc \ @HAVE_SCRIPT_TRUE@randgen.cc relabel.cc replace.cc reverse.cc reweight.cc rmepsilon.cc \ @HAVE_SCRIPT_TRUE@shortest-distance.cc shortest-path.cc stateiterator-class.cc synchronize.cc \ @HAVE_SCRIPT_TRUE@text-io.cc topsort.cc union.cc weight-class.cc verify.cc @HAVE_SCRIPT_TRUE@libfstscript_la_LIBADD = ../lib/libfst.la -lm $(DL_LIBS) @HAVE_SCRIPT_TRUE@libfstscript_la_LDFLAGS = -version-info 13:0:0 all: all-am .SUFFIXES: .SUFFIXES: .cc .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/script/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/script/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libfstscript.la: $(libfstscript_la_OBJECTS) $(libfstscript_la_DEPENDENCIES) $(EXTRA_libfstscript_la_DEPENDENCIES) $(AM_V_CXXLD)$(libfstscript_la_LINK) $(am_libfstscript_la_rpath) $(libfstscript_la_OBJECTS) $(libfstscript_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arciterator-class.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsort.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/closure.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compile.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compose.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/concat.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/connect.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/convert.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/decode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/determinize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/difference.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/disambiguate.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/draw.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/encode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/encodemapper-class.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/epsnormalize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/equal.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/equivalent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fst-class.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getters.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info-impl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/intersect.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/invert.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/isomorphic.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/map.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/minimize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/project.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/prune.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/push.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/randequivalent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/randgen.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/relabel.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/replace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reverse.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reweight.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rmepsilon.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/shortest-distance.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/shortest-path.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stateiterator-class.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/synchronize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/text-io.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topsort.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/union.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/verify.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/weight-class.Plo@am__quote@ .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cc.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(libdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-libLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
0
coqui_public_repos/STT-models/slovenian/itml
coqui_public_repos/STT-models/slovenian/itml/v0.1.1/MODEL_CARD.md
# Model card for Slovenian STT Jump to section: - [Model details](#model-details) - [Intended use](#intended-use) - [Performance Factors](#performance-factors) - [Metrics](#metrics) - [Training data](#training-data) - [Evaluation data](#evaluation-data) - [Ethical considerations](#ethical-considerations) - [Caveats and recommendations](#caveats-and-recommendations) ## Model details - Person or organization developing model: Originally trained by [Francis Tyers](https://scholar.google.fr/citations?user=o5HSM6cAAAAJ) and the [Inclusive Technology for Marginalised Languages](https://itml.cl.indiana.edu/) group. - Model language: Slovenian / Slovenščina / `sl` - Model date: April 26, 2021 - Model type: `Speech-to-Text` - Model version: `v0.1.1` - Compatible with 🐸 STT version: `v0.9.3` - License: AGPL - Citation details: `@techreport{slovenian-stt, author = {Tyers,Francis}, title = {Slovenian STT 0.1}, institution = {Coqui}, address = {\url{https://github.com/coqui-ai/STT-models}} year = {2021}, month = {April}, number = {STT-CV6.1-SL-0.1} }` - Where to send questions or comments about the model: You can leave an issue on [`STT-model` issues](https://github.com/coqui-ai/STT-models/issues), open a new discussion on [`STT-model` discussions](https://github.com/coqui-ai/STT-models/discussions), or chat with us on [Gitter](https://gitter.im/coqui-ai/). ## Intended use Speech-to-Text for the [Slovenian Language](https://en.wikipedia.org/wiki/Slovenian_language) on 16kHz, mono-channel audio. ## Performance Factors Factors relevant to Speech-to-Text performance include but are not limited to speaker demographics, recording quality, and background noise. Read more about STT performance factors [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). ## Metrics STT models are usually evaluated in terms of their transcription accuracy, deployment Real-Time Factor, and model size on disk. #### Transcription Accuracy The following Word Error Rates and Character Error Rates are reported on [omnilingo](https://tepozcatl.omnilingo.cc/sl/). |Test Corpus|WER|CER| |-----------|---|---| |Common Voice|82.4\%|26.8\%| #### Real-Time Factor Real-Time Factor (RTF) is defined as `processing-time / length-of-audio`. The exact real-time factor of an STT model will depend on the hardware setup, so you may experience a different RTF. Recorded average RTF on laptop CPU: `` #### Model Size `model.pbmm`: 181M `model.tflite`: 46M ### Approaches to uncertainty and variability Confidence scores and multiple paths from the decoding beam can be used to measure model uncertainty and provide multiple, variable transcripts for any processed audio. ## Training data This model was trained on Common Voice 6.1 train. ## Evaluation data The Model was evaluated on Common Voice 6.1 test. ## Ethical considerations Deploying a Speech-to-Text model into any production setting has ethical implications. You should consider these implications before use. ### Demographic Bias You should assume every machine learning model has demographic bias unless proven otherwise. For STT models, it is often the case that transcription accuracy is better for men than it is for women. If you are using this model in production, you should acknowledge this as a potential issue. ### Surveillance Speech-to-Text may be mis-used to invade the privacy of others by recording and mining information from private conversations. This kind of individual privacy is protected by law in may countries. You should not assume consent to record and analyze private speech. ## Caveats and recommendations Machine learning models (like this STT model) perform best on data that is similar to the data on which they were trained. Read about what to expect from an STT model with regard to your data [here](https://stt.readthedocs.io/en/latest/DEPLOYMENT.html#how-will-a-model-perform-on-my-data). In most applications, it is recommended that you [train your own language model](https://stt.readthedocs.io/en/latest/LANGUAGE_MODEL.html) to improve transcription accuracy on your speech data.
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/decoder/LexiconFreeDecoder.cpp
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT-style license found in the * LICENSE file in the root directory of this source tree. */ #include <stdlib.h> #include <algorithm> #include <cmath> #include <functional> #include <numeric> #include "flashlight/lib/text/decoder/LexiconFreeDecoder.h" namespace fl { namespace lib { namespace text { void LexiconFreeDecoder::decodeBegin() { hyp_.clear(); hyp_.emplace(0, std::vector<LexiconFreeDecoderState>()); /* note: the lm reset itself with :start() */ hyp_[0].emplace_back(0.0, lm_->start(0), nullptr, sil_); nDecodedFrames_ = 0; nPrunedFrames_ = 0; } void LexiconFreeDecoder::decodeStep(const float* emissions, int T, int N) { int startFrame = nDecodedFrames_ - nPrunedFrames_; // Extend hyp_ buffer if (hyp_.size() < startFrame + T + 2) { for (int i = hyp_.size(); i < startFrame + T + 2; i++) { hyp_.emplace(i, std::vector<LexiconFreeDecoderState>()); } } std::vector<size_t> idx(N); // Looping over all the frames for (int t = 0; t < T; t++) { std::iota(idx.begin(), idx.end(), 0); if (N > opt_.beamSizeToken) { std::partial_sort( idx.begin(), idx.begin() + opt_.beamSizeToken, idx.end(), [&t, &N, &emissions](const size_t& l, const size_t& r) { return emissions[t * N + l] > emissions[t * N + r]; }); } candidatesReset(candidatesBestScore_, candidates_, candidatePtrs_); for (const LexiconFreeDecoderState& prevHyp : hyp_[startFrame + t]) { const int prevIdx = prevHyp.token; for (int r = 0; r < std::min(opt_.beamSizeToken, N); ++r) { int n = idx[r]; double amScore = emissions[t * N + n]; if (nDecodedFrames_ + t > 0 && opt_.criterionType == CriterionType::ASG) { amScore += transitions_[n * N + prevIdx]; } double score = prevHyp.score + emissions[t * N + n]; if (n == sil_) { score += opt_.silScore; } if ((opt_.criterionType == CriterionType::ASG && n != prevIdx) || (opt_.criterionType == CriterionType::CTC && n != blank_ && (n != prevIdx || prevHyp.prevBlank))) { auto lmStateScorePair = lm_->score(prevHyp.lmState, n); auto lmScore = lmStateScorePair.second; candidatesAdd( candidates_, candidatesBestScore_, opt_.beamThreshold, score + opt_.lmWeight * lmScore, lmStateScorePair.first, &prevHyp, n, false, // prevBlank prevHyp.amScore + amScore, prevHyp.lmScore + lmScore); } else if (opt_.criterionType == CriterionType::CTC && n == blank_) { candidatesAdd( candidates_, candidatesBestScore_, opt_.beamThreshold, score, prevHyp.lmState, &prevHyp, n, true, // prevBlank prevHyp.amScore + amScore, prevHyp.lmScore); } else { candidatesAdd( candidates_, candidatesBestScore_, opt_.beamThreshold, score, prevHyp.lmState, &prevHyp, n, false, // prevBlank prevHyp.amScore + amScore, prevHyp.lmScore); } } } candidatesStore( candidates_, candidatePtrs_, hyp_[startFrame + t + 1], opt_.beamSize, candidatesBestScore_ - opt_.beamThreshold, opt_.logAdd, false); updateLMCache(lm_, hyp_[startFrame + t + 1]); } nDecodedFrames_ += T; } void LexiconFreeDecoder::decodeEnd() { candidatesReset(candidatesBestScore_, candidates_, candidatePtrs_); for (const LexiconFreeDecoderState& prevHyp : hyp_[nDecodedFrames_ - nPrunedFrames_]) { const LMStatePtr& prevLmState = prevHyp.lmState; auto lmStateScorePair = lm_->finish(prevLmState); auto lmScore = lmStateScorePair.second; candidatesAdd( candidates_, candidatesBestScore_, opt_.beamThreshold, prevHyp.score + opt_.lmWeight * lmScore, lmStateScorePair.first, &prevHyp, sil_, false, // prevBlank prevHyp.amScore, prevHyp.lmScore + lmScore); } candidatesStore( candidates_, candidatePtrs_, hyp_[nDecodedFrames_ - nPrunedFrames_ + 1], opt_.beamSize, candidatesBestScore_ - opt_.beamThreshold, opt_.logAdd, true); ++nDecodedFrames_; } std::vector<DecodeResult> LexiconFreeDecoder::getAllFinalHypothesis() const { int finalFrame = nDecodedFrames_ - nPrunedFrames_; return getAllHypothesis(hyp_.find(finalFrame)->second, finalFrame); } DecodeResult LexiconFreeDecoder::getBestHypothesis(int lookBack) const { int finalFrame = nDecodedFrames_ - nPrunedFrames_; const LexiconFreeDecoderState* bestNode = findBestAncestor(hyp_.find(finalFrame)->second, lookBack); return getHypothesis(bestNode, nDecodedFrames_ - nPrunedFrames_ - lookBack); } int LexiconFreeDecoder::nHypothesis() const { int finalFrame = nDecodedFrames_ - nPrunedFrames_; return hyp_.find(finalFrame)->second.size(); } int LexiconFreeDecoder::nDecodedFramesInBuffer() const { return nDecodedFrames_ - nPrunedFrames_ + 1; } void LexiconFreeDecoder::prune(int lookBack) { if (nDecodedFrames_ - nPrunedFrames_ - lookBack < 1) { return; // Not enough decoded frames to prune } /* (1) Find the last emitted word in the best path */ int finalFrame = nDecodedFrames_ - nPrunedFrames_; const LexiconFreeDecoderState* bestNode = findBestAncestor(hyp_.find(finalFrame)->second, lookBack); if (!bestNode) { return; // Not enough decoded frames to prune } int startFrame = nDecodedFrames_ - nPrunedFrames_ - lookBack; if (startFrame < 1) { return; // Not enough decoded frames to prune } /* (2) Move things from back of hyp_ to front and normalize scores */ pruneAndNormalize(hyp_, startFrame, lookBack); nPrunedFrames_ = nDecodedFrames_ - lookBack; } } // namespace text } // namespace lib } // namespace fl
0
coqui_public_repos
coqui_public_repos/Trainer/CODE_OF_CONDUCT.md
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at coc-report@coqui.ai. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations
0
coqui_public_repos
coqui_public_repos/Trainer/.pylintrc
[MAIN] # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=no # Clear in-memory caches upon conclusion of linting. Useful if running pylint # in a server-like mode. clear-cache-post-run=no # Load and enable all available extensions. Use --list-extensions to see a list # all available extensions. #enable-all-extensions= # In error mode, messages with a category besides ERROR or FATAL are # suppressed, and no reports are done by default. Error mode is compatible with # disabling specific errors. #errors-only= # Always return a 0 (non-error) status code, even if lint errors are found. # This is primarily useful in continuous integration scripts. #exit-zero= # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-allow-list= # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. (This is an alternative name to extension-pkg-allow-list # for backward compatibility.) extension-pkg-whitelist= # Return non-zero exit code if any of these messages/categories are detected, # even if score is above --fail-under value. Syntax same as enable. Messages # specified are enabled, while categories only check already-enabled messages. fail-on= # Specify a score threshold under which the program will exit with error. fail-under=10 # Interpret the stdin as a python script, whose filename needs to be passed as # the module_or_package argument. #from-stdin= # Files or directories to be skipped. They should be base names, not paths. ignore=CVS # Add files or directories matching the regular expressions patterns to the # ignore-list. The regex matches against paths and can be in Posix or Windows # format. Because '\\' represents the directory delimiter on Windows systems, # it can't be used as an escape character. ignore-paths= # Files or directories matching the regular expression patterns are skipped. # The regex matches against base names, not paths. The default value ignores # Emacs file locks ignore-patterns=^\.# # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis). It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use, and will cap the count on Windows to # avoid hangs. jobs=1 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python module names) to load, # usually to register additional checkers. load-plugins= # Pickle collected data for later comparisons. persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. py-version=3.11 # Discover python modules and packages in the file system subtree. recursive=no # Add paths to the list of the source roots. Supports globbing patterns. The # source root is an absolute path or a path relative to the current working # directory used to determine a package namespace for modules located under the # source root. source-roots= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # In verbose mode, extra non-checker-related info will be displayed. #verbose= [BASIC] # Naming style matching correct argument names. argument-naming-style=snake_case # Regular expression matching correct argument names. Overrides argument- # naming-style. If left empty, argument names will be checked with the set # naming style. #argument-rgx= # Naming style matching correct attribute names. attr-naming-style=snake_case # Regular expression matching correct attribute names. Overrides attr-naming- # style. If left empty, attribute names will be checked with the set naming # style. #attr-rgx= # Bad variable names which should always be refused, separated by a comma. bad-names=foo, bar, baz, toto, tutu, tata # Bad variable names regexes, separated by a comma. If names match any regex, # they will always be refused bad-names-rgxs= # Naming style matching correct class attribute names. class-attribute-naming-style=any # Regular expression matching correct class attribute names. Overrides class- # attribute-naming-style. If left empty, class attribute names will be checked # with the set naming style. #class-attribute-rgx= # Naming style matching correct class constant names. class-const-naming-style=UPPER_CASE # Regular expression matching correct class constant names. Overrides class- # const-naming-style. If left empty, class constant names will be checked with # the set naming style. #class-const-rgx= # Naming style matching correct class names. class-naming-style=PascalCase # Regular expression matching correct class names. Overrides class-naming- # style. If left empty, class names will be checked with the set naming style. #class-rgx= # Naming style matching correct constant names. const-naming-style=UPPER_CASE # Regular expression matching correct constant names. Overrides const-naming- # style. If left empty, constant names will be checked with the set naming # style. #const-rgx= # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 # Naming style matching correct function names. function-naming-style=snake_case # Regular expression matching correct function names. Overrides function- # naming-style. If left empty, function names will be checked with the set # naming style. #function-rgx= # Good variable names which should always be accepted, separated by a comma. good-names=i, j, k, ex, Run, _ # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted good-names-rgxs= # Include a hint for the correct naming format with invalid-name. include-naming-hint=no # Naming style matching correct inline iteration names. inlinevar-naming-style=any # Regular expression matching correct inline iteration names. Overrides # inlinevar-naming-style. If left empty, inline iteration names will be checked # with the set naming style. #inlinevar-rgx= # Naming style matching correct method names. method-naming-style=snake_case # Regular expression matching correct method names. Overrides method-naming- # style. If left empty, method names will be checked with the set naming style. #method-rgx= # Naming style matching correct module names. module-naming-style=snake_case # Regular expression matching correct module names. Overrides module-naming- # style. If left empty, module names will be checked with the set naming style. #module-rgx= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. # These decorators are taken in consideration only for invalid-name. property-classes=abc.abstractproperty # Regular expression matching correct type alias names. If left empty, type # alias names will be checked with the set naming style. #typealias-rgx= # Regular expression matching correct type variable names. If left empty, type # variable names will be checked with the set naming style. #typevar-rgx= # Naming style matching correct variable names. variable-naming-style=snake_case # Regular expression matching correct variable names. Overrides variable- # naming-style. If left empty, variable names will be checked with the set # naming style. #variable-rgx= [CLASSES] # Warn about protected attribute access inside special methods check-protected-access-in-special-methods=no # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__, __new__, setUp, asyncSetUp, __post_init__ # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs [DESIGN] # List of regular expressions of class ancestor names to ignore when counting # public methods (see R0903) exclude-too-few-public-methods= # List of qualified class names to ignore when counting class parents (see # R0901) ignored-parents= # Maximum number of arguments for function / method. max-args=5 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Maximum number of boolean expressions in an if statement (see R0916). max-bool-expr=5 # Maximum number of branch for function / method body. max-branches=12 # Maximum number of locals for function / method body. max-locals=15 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of return / yield for function / method body. max-returns=6 # Maximum number of statements in function / method body. max-statements=50 # Minimum number of public methods for a class (see R0903). min-public-methods=2 [EXCEPTIONS] # Exceptions that will emit a warning when caught. overgeneral-exceptions=builtins.BaseException,builtins.Exception [FORMAT] # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )?<?https?://\S+>?$ # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Maximum number of characters on a single line. max-line-length=100 # Maximum number of lines in a module. max-module-lines=1000 # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no [IMPORTS] # List of modules that can be imported at any level, not just the top level # one. allow-any-import-level= # Allow explicit reexports by alias from a package __init__. allow-reexport-from-package=no # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no # Deprecated modules which should not be used, separated by a comma. deprecated-modules= # Output a graph (.gv or any supported image format) of external dependencies # to the given file (report RP0402 must not be disabled). ext-import-graph= # Output a graph (.gv or any supported image format) of all (i.e. internal and # external) dependencies to the given file (report RP0402 must not be # disabled). import-graph= # Output a graph (.gv or any supported image format) of internal dependencies # to the given file (report RP0402 must not be disabled). int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library= # Force import order to recognize a module as part of a third party library. known-third-party=enchant # Couples of modules and preferred modules, separated by a comma. preferred-modules= [LOGGING] # The type of string formatting that logging methods do. `old` means using % # formatting, `new` is for `{}` formatting. logging-format-style=old # Logging modules to check that the string format arguments are in logging # function parameter format. logging-modules=logging [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, # UNDEFINED. confidence=HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then re-enable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable=raw-checker-failed, bad-inline-option, locally-disabled, file-ignored, suppressed-message, useless-suppression, deprecated-pragma, use-symbolic-message-instead, line-too-long, missing-function-docstring, missing-module-docstring, missing-class-docstring, invalid-name, consider-using-f-string, too-many-instance-attributes, no-member, too-many-locals, too-many-branches, too-many-arguments, fixme, too-many-lines, too-many-statements, too-many-public-methods, duplicate-code, # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. enable=c-extension-no-member [METHOD_ARGS] # List of qualified names (i.e., library.method) which require a timeout # parameter e.g. 'requests.api.get,requests.api.post' timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME, XXX, TODO # Regular expression of note tags to take in consideration. notes-rgx= [REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 # Complete name of functions that never returns. When checking for # inconsistent-return-statements if a never returning function is called then # it will be considered as an explicit return statement and no message will be # printed. never-returning-functions=sys.exit,argparse.parse_error [REPORTS] # Python expression which should return a score less than or equal to 10. You # have access to the variables 'fatal', 'error', 'warning', 'refactor', # 'convention', and 'info' which contain the number of messages in each # category, as well as 'statement' which is the total number of statements # analyzed. This score is used by the global evaluation report (RP0004). evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details. msg-template= # Set the output format. Available formats are text, parseable, colorized, json # and msvs (visual studio). You can also give a reporter class, e.g. # mypackage.mymodule.MyReporterClass. #output-format= # Tells whether to display a full report or only the messages. reports=no # Activate the evaluation score. score=yes [SIMILARITIES] # Comments are removed from the similarity computation ignore-comments=yes # Docstrings are removed from the similarity computation ignore-docstrings=yes # Imports are removed from the similarity computation ignore-imports=yes # Signatures are removed from the similarity computation ignore-signatures=yes # Minimum lines number of a similarity. min-similarity-lines=4 [SPELLING] # Limits count of emitted suggestions for spelling mistakes. max-spelling-suggestions=4 # Spelling dictionary name. No available dictionaries : You need to install # both the python package and the system dependency for enchant to work.. spelling-dict= # List of comma separated words that should be considered directives if they # appear at the beginning of a comment and should not be checked. spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains the private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to the private dictionary (see the # --spelling-private-dict-file option) instead of raising a message. spelling-store-unknown-words=no [STRING] # This flag controls whether inconsistent-quotes generates a warning when the # character used as a quote delimiter is used inconsistently within a module. check-quote-consistency=no # This flag controls whether the implicit-str-concat should generate a warning # on implicit string concatenation in sequences defined over several lines. check-str-concat-over-line-jumps=no [TYPECHECK] # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. contextmanager-decorators=contextlib.contextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members= # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. ignore-none=yes # This flag controls whether pylint should warn about no-member and similar # checks whenever an opaque object is returned when inferring. The inference # can return multiple potential results while evaluating a Python object, but # some branches might not be evaluated, which results in partial inference. In # that case, it might be useful to still emit no-member and other checks for # the rest of the inferred objects. ignore-on-opaque-inference=yes # List of symbolic message names to ignore for Mixin members. ignored-checks-for-mixins=no-member, not-async-context-manager, not-context-manager, attribute-defined-outside-init # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. missing-member-hint=yes # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. missing-member-hint-distance=1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. missing-member-max-choices=1 # Regex pattern to define which classes are considered mixins. mixin-class-rgx=.*[Mm]ixin # List of decorators that change the signature of a decorated function. signature-mutators= [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid defining new builtins when possible. additional-builtins= # Tells whether unused global variables should be treated as a violation. allow-global-unused-variables=yes # List of names allowed to shadow builtins allowed-redefined-builtins= # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, _cb # A regular expression matching the name of dummy variables (i.e. expected to # not be used). dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ # Argument names that match this expression will be ignored. ignored-argument-names=_.*|^ignored_|^unused_ # Tells whether we should check for unused import in __init__ files. init-import=no # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include
coqui_public_repos/STT/native_client/ctcdecode/third_party/openfst-1.6.7/src/include/fst/float-weight.h
// See www.openfst.org for extensive documentation on this weighted // finite-state transducer library. // // Float weight set and associated semiring operation definitions. #ifndef FST_FLOAT_WEIGHT_H_ #define FST_FLOAT_WEIGHT_H_ #include <climits> #include <cmath> #include <cstdlib> #include <cstring> #include <algorithm> #include <limits> #include <sstream> #include <string> #include <fst/util.h> #include <fst/weight.h> namespace fst { // Numeric limits class. template <class T> class FloatLimits { public: static constexpr T PosInfinity() { return std::numeric_limits<T>::infinity(); } static constexpr T NegInfinity() { return -PosInfinity(); } static constexpr T NumberBad() { return std::numeric_limits<T>::quiet_NaN(); } }; // Weight class to be templated on floating-points types. template <class T = float> class FloatWeightTpl { public: using ValueType = T; FloatWeightTpl() {} FloatWeightTpl(T f) : value_(f) {} FloatWeightTpl(const FloatWeightTpl<T> &weight) : value_(weight.value_) {} FloatWeightTpl<T> &operator=(const FloatWeightTpl<T> &weight) { value_ = weight.value_; return *this; } std::istream &Read(std::istream &strm) { return ReadType(strm, &value_); } std::ostream &Write(std::ostream &strm) const { return WriteType(strm, value_); } size_t Hash() const { size_t hash = 0; // Avoid using union, which would be undefined behavior. // Use memcpy, similar to bit_cast, but sizes may be different. // This should be optimized into a single move instruction by // any reasonable compiler. std::memcpy(&hash, &value_, std::min(sizeof(hash), sizeof(value_))); return hash; } const T &Value() const { return value_; } protected: void SetValue(const T &f) { value_ = f; } static constexpr const char *GetPrecisionString() { return sizeof(T) == 4 ? "" : sizeof(T) == 1 ? "8" : sizeof(T) == 2 ? "16" : sizeof(T) == 8 ? "64" : "unknown"; } private: T value_; }; // Single-precision float weight. using FloatWeight = FloatWeightTpl<float>; template <class T> inline bool operator==(const FloatWeightTpl<T> &w1, const FloatWeightTpl<T> &w2) { // Volatile qualifier thwarts over-aggressive compiler optimizations that // lead to problems esp. with NaturalLess(). volatile T v1 = w1.Value(); volatile T v2 = w2.Value(); return v1 == v2; } // These seemingly unnecessary overloads are actually needed to make // comparisons like FloatWeightTpl<float> == float compile. If only the // templated version exists, the FloatWeightTpl<float>(float) conversion // won't be found. inline bool operator==(const FloatWeightTpl<float> &w1, const FloatWeightTpl<float> &w2) { return operator==<float>(w1, w2); } inline bool operator==(const FloatWeightTpl<double> &w1, const FloatWeightTpl<double> &w2) { return operator==<double>(w1, w2); } template <class T> inline bool operator!=(const FloatWeightTpl<T> &w1, const FloatWeightTpl<T> &w2) { return !(w1 == w2); } inline bool operator!=(const FloatWeightTpl<float> &w1, const FloatWeightTpl<float> &w2) { return operator!=<float>(w1, w2); } inline bool operator!=(const FloatWeightTpl<double> &w1, const FloatWeightTpl<double> &w2) { return operator!=<double>(w1, w2); } template <class T> inline bool ApproxEqual(const FloatWeightTpl<T> &w1, const FloatWeightTpl<T> &w2, float delta = kDelta) { return w1.Value() <= w2.Value() + delta && w2.Value() <= w1.Value() + delta; } template <class T> inline std::ostream &operator<<(std::ostream &strm, const FloatWeightTpl<T> &w) { if (w.Value() == FloatLimits<T>::PosInfinity()) { return strm << "Infinity"; } else if (w.Value() == FloatLimits<T>::NegInfinity()) { return strm << "-Infinity"; } else if (w.Value() != w.Value()) { // Fails for IEEE NaN. return strm << "BadNumber"; } else { return strm << w.Value(); } } template <class T> inline std::istream &operator>>(std::istream &strm, FloatWeightTpl<T> &w) { string s; strm >> s; if (s == "Infinity") { w = FloatWeightTpl<T>(FloatLimits<T>::PosInfinity()); } else if (s == "-Infinity") { w = FloatWeightTpl<T>(FloatLimits<T>::NegInfinity()); } else { char *p; T f = strtod(s.c_str(), &p); if (p < s.c_str() + s.size()) { strm.clear(std::ios::badbit); } else { w = FloatWeightTpl<T>(f); } } return strm; } // Tropical semiring: (min, +, inf, 0). template <class T> class TropicalWeightTpl : public FloatWeightTpl<T> { public: using typename FloatWeightTpl<T>::ValueType; using FloatWeightTpl<T>::Value; using ReverseWeight = TropicalWeightTpl<T>; using Limits = FloatLimits<T>; constexpr TropicalWeightTpl() : FloatWeightTpl<T>() {} constexpr TropicalWeightTpl(T f) : FloatWeightTpl<T>(f) {} constexpr TropicalWeightTpl(const TropicalWeightTpl<T> &weight) : FloatWeightTpl<T>(weight) {} static const TropicalWeightTpl<T> &Zero() { static const TropicalWeightTpl zero(Limits::PosInfinity()); return zero; } static const TropicalWeightTpl<T> &One() { static const TropicalWeightTpl one(0.0F); return one; } static const TropicalWeightTpl<T> &NoWeight() { static const TropicalWeightTpl no_weight(Limits::NumberBad()); return no_weight; } static const string &Type() { static const string *const type = new string(string("tropical") + FloatWeightTpl<T>::GetPrecisionString()); return *type; } bool Member() const { // First part fails for IEEE NaN. return Value() == Value() && Value() != Limits::NegInfinity(); } TropicalWeightTpl<T> Quantize(float delta = kDelta) const { if (!Member() || Value() == Limits::PosInfinity()) { return *this; } else { return TropicalWeightTpl<T>(floor(Value() / delta + 0.5F) * delta); } } TropicalWeightTpl<T> Reverse() const { return *this; } static constexpr uint64 Properties() { return kLeftSemiring | kRightSemiring | kCommutative | kPath | kIdempotent; } }; // Single precision tropical weight. using TropicalWeight = TropicalWeightTpl<float>; template <class T> inline TropicalWeightTpl<T> Plus(const TropicalWeightTpl<T> &w1, const TropicalWeightTpl<T> &w2) { if (!w1.Member() || !w2.Member()) return TropicalWeightTpl<T>::NoWeight(); return w1.Value() < w2.Value() ? w1 : w2; } // See comment at operator==(FloatWeightTpl<float>, FloatWeightTpl<float>) // for why these overloads are present. inline TropicalWeightTpl<float> Plus(const TropicalWeightTpl<float> &w1, const TropicalWeightTpl<float> &w2) { return Plus<float>(w1, w2); } inline TropicalWeightTpl<double> Plus(const TropicalWeightTpl<double> &w1, const TropicalWeightTpl<double> &w2) { return Plus<double>(w1, w2); } template <class T> inline TropicalWeightTpl<T> Times(const TropicalWeightTpl<T> &w1, const TropicalWeightTpl<T> &w2) { using Limits = FloatLimits<T>; if (!w1.Member() || !w2.Member()) return TropicalWeightTpl<T>::NoWeight(); const T f1 = w1.Value(); const T f2 = w2.Value(); if (f1 == Limits::PosInfinity()) { return w1; } else if (f2 == Limits::PosInfinity()) { return w2; } else { return TropicalWeightTpl<T>(f1 + f2); } } inline TropicalWeightTpl<float> Times(const TropicalWeightTpl<float> &w1, const TropicalWeightTpl<float> &w2) { return Times<float>(w1, w2); } inline TropicalWeightTpl<double> Times(const TropicalWeightTpl<double> &w1, const TropicalWeightTpl<double> &w2) { return Times<double>(w1, w2); } template <class T> inline TropicalWeightTpl<T> Divide(const TropicalWeightTpl<T> &w1, const TropicalWeightTpl<T> &w2, DivideType typ = DIVIDE_ANY) { using Limits = FloatLimits<T>; if (!w1.Member() || !w2.Member()) return TropicalWeightTpl<T>::NoWeight(); const T f1 = w1.Value(); const T f2 = w2.Value(); if (f2 == Limits::PosInfinity()) { return Limits::NumberBad(); } else if (f1 == Limits::PosInfinity()) { return Limits::PosInfinity(); } else { return TropicalWeightTpl<T>(f1 - f2); } } inline TropicalWeightTpl<float> Divide(const TropicalWeightTpl<float> &w1, const TropicalWeightTpl<float> &w2, DivideType typ = DIVIDE_ANY) { return Divide<float>(w1, w2, typ); } inline TropicalWeightTpl<double> Divide(const TropicalWeightTpl<double> &w1, const TropicalWeightTpl<double> &w2, DivideType typ = DIVIDE_ANY) { return Divide<double>(w1, w2, typ); } template <class T, class V> inline TropicalWeightTpl<T> Power(const TropicalWeightTpl<T> &weight, V n) { if (n == 0) { return TropicalWeightTpl<T>::One(); } else if (weight == TropicalWeightTpl<T>::Zero()) { return TropicalWeightTpl<T>::Zero(); } return TropicalWeightTpl<T>(weight.Value() * n); } // Specializes the library-wide template to use the above implementation; rules // of function template instantiation require this be a full instantiation. template <> inline TropicalWeightTpl<float> Power<TropicalWeightTpl<float>>( const TropicalWeightTpl<float> &weight, size_t n) { return Power<float, size_t>(weight, n); } template <> inline TropicalWeightTpl<double> Power<TropicalWeightTpl<double>>( const TropicalWeightTpl<double> &weight, size_t n) { return Power<double, size_t>(weight, n); } // Log semiring: (log(e^-x + e^-y), +, inf, 0). template <class T> class LogWeightTpl : public FloatWeightTpl<T> { public: using typename FloatWeightTpl<T>::ValueType; using FloatWeightTpl<T>::Value; using ReverseWeight = LogWeightTpl; using Limits = FloatLimits<T>; constexpr LogWeightTpl() : FloatWeightTpl<T>() {} constexpr LogWeightTpl(T f) : FloatWeightTpl<T>(f) {} constexpr LogWeightTpl(const LogWeightTpl<T> &weight) : FloatWeightTpl<T>(weight) {} static const LogWeightTpl &Zero() { static const LogWeightTpl zero(Limits::PosInfinity()); return zero; } static const LogWeightTpl &One() { static const LogWeightTpl one(0.0F); return one; } static const LogWeightTpl &NoWeight() { static const LogWeightTpl no_weight(Limits::NumberBad()); return no_weight; } static const string &Type() { static const string *const type = new string(string("log") + FloatWeightTpl<T>::GetPrecisionString()); return *type; } bool Member() const { // First part fails for IEEE NaN. return Value() == Value() && Value() != Limits::NegInfinity(); } LogWeightTpl<T> Quantize(float delta = kDelta) const { if (!Member() || Value() == Limits::PosInfinity()) { return *this; } else { return LogWeightTpl<T>(floor(Value() / delta + 0.5F) * delta); } } LogWeightTpl<T> Reverse() const { return *this; } static constexpr uint64 Properties() { return kLeftSemiring | kRightSemiring | kCommutative; } }; // Single-precision log weight. using LogWeight = LogWeightTpl<float>; // Double-precision log weight. using Log64Weight = LogWeightTpl<double>; namespace internal { // -log(e^-x + e^-y) = x - LogPosExp(y - x), assuming x >= 0.0. inline double LogPosExp(double x) { DCHECK(!(x < 0)); // NB: NaN values are allowed. return log1p(exp(-x)); } // -log(e^-x - e^-y) = x - LogNegExp(y - x), assuming x > 0.0. inline double LogNegExp(double x) { DCHECK_GT(x, 0); return log1p(-exp(-x)); } // a +_log b = -log(e^-a + e^-b) = KahanLogSum(a, b, ...). // Kahan compensated summation provides an error bound that is // independent of the number of addends. Assumes b >= a; // c is the compensation. inline double KahanLogSum(double a, double b, double *c) { DCHECK_GE(b, a); double y = -LogPosExp(b - a) - *c; double t = a + y; *c = (t - a) - y; return t; } // a -_log b = -log(e^-a - e^-b) = KahanLogDiff(a, b, ...). // Kahan compensated summation provides an error bound that is // independent of the number of addends. Assumes b > a; // c is the compensation. inline double KahanLogDiff(double a, double b, double *c) { DCHECK_GT(b, a); double y = -LogNegExp(b - a) - *c; double t = a + y; *c = (t - a) - y; return t; } } // namespace internal template <class T> inline LogWeightTpl<T> Plus(const LogWeightTpl<T> &w1, const LogWeightTpl<T> &w2) { using Limits = FloatLimits<T>; const T f1 = w1.Value(); const T f2 = w2.Value(); if (f1 == Limits::PosInfinity()) { return w2; } else if (f2 == Limits::PosInfinity()) { return w1; } else if (f1 > f2) { return LogWeightTpl<T>(f2 - internal::LogPosExp(f1 - f2)); } else { return LogWeightTpl<T>(f1 - internal::LogPosExp(f2 - f1)); } } inline LogWeightTpl<float> Plus(const LogWeightTpl<float> &w1, const LogWeightTpl<float> &w2) { return Plus<float>(w1, w2); } inline LogWeightTpl<double> Plus(const LogWeightTpl<double> &w1, const LogWeightTpl<double> &w2) { return Plus<double>(w1, w2); } template <class T> inline LogWeightTpl<T> Times(const LogWeightTpl<T> &w1, const LogWeightTpl<T> &w2) { using Limits = FloatLimits<T>; if (!w1.Member() || !w2.Member()) return LogWeightTpl<T>::NoWeight(); const T f1 = w1.Value(); const T f2 = w2.Value(); if (f1 == Limits::PosInfinity()) { return w1; } else if (f2 == Limits::PosInfinity()) { return w2; } else { return LogWeightTpl<T>(f1 + f2); } } inline LogWeightTpl<float> Times(const LogWeightTpl<float> &w1, const LogWeightTpl<float> &w2) { return Times<float>(w1, w2); } inline LogWeightTpl<double> Times(const LogWeightTpl<double> &w1, const LogWeightTpl<double> &w2) { return Times<double>(w1, w2); } template <class T> inline LogWeightTpl<T> Divide(const LogWeightTpl<T> &w1, const LogWeightTpl<T> &w2, DivideType typ = DIVIDE_ANY) { using Limits = FloatLimits<T>; if (!w1.Member() || !w2.Member()) return LogWeightTpl<T>::NoWeight(); const T f1 = w1.Value(); const T f2 = w2.Value(); if (f2 == Limits::PosInfinity()) { return Limits::NumberBad(); } else if (f1 == Limits::PosInfinity()) { return Limits::PosInfinity(); } else { return LogWeightTpl<T>(f1 - f2); } } inline LogWeightTpl<float> Divide(const LogWeightTpl<float> &w1, const LogWeightTpl<float> &w2, DivideType typ = DIVIDE_ANY) { return Divide<float>(w1, w2, typ); } inline LogWeightTpl<double> Divide(const LogWeightTpl<double> &w1, const LogWeightTpl<double> &w2, DivideType typ = DIVIDE_ANY) { return Divide<double>(w1, w2, typ); } template <class T, class V> inline LogWeightTpl<T> Power(const LogWeightTpl<T> &weight, V n) { if (n == 0) { return LogWeightTpl<T>::One(); } else if (weight == LogWeightTpl<T>::Zero()) { return LogWeightTpl<T>::Zero(); } return LogWeightTpl<T>(weight.Value() * n); } // Specializes the library-wide template to use the above implementation; rules // of function template instantiation require this be a full instantiation. template <> inline LogWeightTpl<float> Power<LogWeightTpl<float>>( const LogWeightTpl<float> &weight, size_t n) { return Power<float, size_t>(weight, n); } template <> inline LogWeightTpl<double> Power<LogWeightTpl<double>>( const LogWeightTpl<double> &weight, size_t n) { return Power<double, size_t>(weight, n); } // Specialization using the Kahan compensated summation. template <class T> class Adder<LogWeightTpl<T>> { public: using Weight = LogWeightTpl<T>; explicit Adder(Weight w = Weight::Zero()) : sum_(w.Value()), c_(0.0) { } Weight Add(const Weight &w) { using Limits = FloatLimits<T>; const T f = w.Value(); if (f == Limits::PosInfinity()) { return Sum(); } else if (sum_ == Limits::PosInfinity()) { sum_ = f; c_ = 0.0; } else if (f > sum_) { sum_ = internal::KahanLogSum(sum_, f, &c_); } else { sum_ = internal::KahanLogSum(f, sum_, &c_); } return Sum(); } Weight Sum() { return Weight(sum_); } void Reset(Weight w = Weight::Zero()) { sum_ = w.Value(); c_ = 0.0; } private: double sum_; double c_; // Kahan compensation. }; // MinMax semiring: (min, max, inf, -inf). template <class T> class MinMaxWeightTpl : public FloatWeightTpl<T> { public: using typename FloatWeightTpl<T>::ValueType; using FloatWeightTpl<T>::Value; using ReverseWeight = MinMaxWeightTpl<T>; using Limits = FloatLimits<T>; MinMaxWeightTpl() : FloatWeightTpl<T>() {} MinMaxWeightTpl(T f) : FloatWeightTpl<T>(f) {} MinMaxWeightTpl(const MinMaxWeightTpl<T> &weight) : FloatWeightTpl<T>(weight) {} static const MinMaxWeightTpl &Zero() { static const MinMaxWeightTpl zero(Limits::PosInfinity()); return zero; } static const MinMaxWeightTpl &One() { static const MinMaxWeightTpl one(Limits::NegInfinity()); return one; } static const MinMaxWeightTpl &NoWeight() { static const MinMaxWeightTpl no_weight(Limits::NumberBad()); return no_weight; } static const string &Type() { static const string *const type = new string(string("minmax") + FloatWeightTpl<T>::GetPrecisionString()); return *type; } // Fails for IEEE NaN. bool Member() const { return Value() == Value(); } MinMaxWeightTpl<T> Quantize(float delta = kDelta) const { // If one of infinities, or a NaN. if (!Member() || Value() == Limits::NegInfinity() || Value() == Limits::PosInfinity()) { return *this; } else { return MinMaxWeightTpl<T>(floor(Value() / delta + 0.5F) * delta); } } MinMaxWeightTpl<T> Reverse() const { return *this; } static constexpr uint64 Properties() { return kLeftSemiring | kRightSemiring | kCommutative | kIdempotent | kPath; } }; // Single-precision min-max weight. using MinMaxWeight = MinMaxWeightTpl<float>; // Min. template <class T> inline MinMaxWeightTpl<T> Plus(const MinMaxWeightTpl<T> &w1, const MinMaxWeightTpl<T> &w2) { if (!w1.Member() || !w2.Member()) return MinMaxWeightTpl<T>::NoWeight(); return w1.Value() < w2.Value() ? w1 : w2; } inline MinMaxWeightTpl<float> Plus(const MinMaxWeightTpl<float> &w1, const MinMaxWeightTpl<float> &w2) { return Plus<float>(w1, w2); } inline MinMaxWeightTpl<double> Plus(const MinMaxWeightTpl<double> &w1, const MinMaxWeightTpl<double> &w2) { return Plus<double>(w1, w2); } // Max. template <class T> inline MinMaxWeightTpl<T> Times(const MinMaxWeightTpl<T> &w1, const MinMaxWeightTpl<T> &w2) { if (!w1.Member() || !w2.Member()) return MinMaxWeightTpl<T>::NoWeight(); return w1.Value() >= w2.Value() ? w1 : w2; } inline MinMaxWeightTpl<float> Times(const MinMaxWeightTpl<float> &w1, const MinMaxWeightTpl<float> &w2) { return Times<float>(w1, w2); } inline MinMaxWeightTpl<double> Times(const MinMaxWeightTpl<double> &w1, const MinMaxWeightTpl<double> &w2) { return Times<double>(w1, w2); } // Defined only for special cases. template <class T> inline MinMaxWeightTpl<T> Divide(const MinMaxWeightTpl<T> &w1, const MinMaxWeightTpl<T> &w2, DivideType typ = DIVIDE_ANY) { if (!w1.Member() || !w2.Member()) return MinMaxWeightTpl<T>::NoWeight(); // min(w1, x) = w2, w1 >= w2 => min(w1, x) = w2, x = w2. return w1.Value() >= w2.Value() ? w1 : FloatLimits<T>::NumberBad(); } inline MinMaxWeightTpl<float> Divide(const MinMaxWeightTpl<float> &w1, const MinMaxWeightTpl<float> &w2, DivideType typ = DIVIDE_ANY) { return Divide<float>(w1, w2, typ); } inline MinMaxWeightTpl<double> Divide(const MinMaxWeightTpl<double> &w1, const MinMaxWeightTpl<double> &w2, DivideType typ = DIVIDE_ANY) { return Divide<double>(w1, w2, typ); } // Converts to tropical. template <> struct WeightConvert<LogWeight, TropicalWeight> { TropicalWeight operator()(const LogWeight &w) const { return w.Value(); } }; template <> struct WeightConvert<Log64Weight, TropicalWeight> { TropicalWeight operator()(const Log64Weight &w) const { return w.Value(); } }; // Converts to log. template <> struct WeightConvert<TropicalWeight, LogWeight> { LogWeight operator()(const TropicalWeight &w) const { return w.Value(); } }; template <> struct WeightConvert<Log64Weight, LogWeight> { LogWeight operator()(const Log64Weight &w) const { return w.Value(); } }; // Converts to log64. template <> struct WeightConvert<TropicalWeight, Log64Weight> { Log64Weight operator()(const TropicalWeight &w) const { return w.Value(); } }; template <> struct WeightConvert<LogWeight, Log64Weight> { Log64Weight operator()(const LogWeight &w) const { return w.Value(); } }; // This function object returns random integers chosen from [0, // num_random_weights). The boolean 'allow_zero' determines whether Zero() and // zero divisors should be returned in the random weight generation. This is // intended primary for testing. template <class Weight> class FloatWeightGenerate { public: explicit FloatWeightGenerate( bool allow_zero = true, const size_t num_random_weights = kNumRandomWeights) : allow_zero_(allow_zero), num_random_weights_(num_random_weights) {} Weight operator()() const { const int n = rand() % (num_random_weights_ + allow_zero_); // NOLINT if (allow_zero_ && n == num_random_weights_) return Weight::Zero(); return Weight(n); } private: // Permits Zero() and zero divisors. const bool allow_zero_; // Number of alternative random weights. const size_t num_random_weights_; }; template <class T> class WeightGenerate<TropicalWeightTpl<T>> : public FloatWeightGenerate<TropicalWeightTpl<T>> { public: using Weight = TropicalWeightTpl<T>; using Generate = FloatWeightGenerate<Weight>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : Generate(allow_zero, num_random_weights) {} Weight operator()() const { return Weight(Generate::operator()()); } }; template <class T> class WeightGenerate<LogWeightTpl<T>> : public FloatWeightGenerate<LogWeightTpl<T>> { public: using Weight = LogWeightTpl<T>; using Generate = FloatWeightGenerate<Weight>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : Generate(allow_zero, num_random_weights) {} Weight operator()() const { return Weight(Generate::operator()()); } }; // This function object returns random integers chosen from [0, // num_random_weights). The boolean 'allow_zero' determines whether Zero() and // zero divisors should be returned in the random weight generation. This is // intended primary for testing. template <class T> class WeightGenerate<MinMaxWeightTpl<T>> { public: using Weight = MinMaxWeightTpl<T>; explicit WeightGenerate(bool allow_zero = true, size_t num_random_weights = kNumRandomWeights) : allow_zero_(allow_zero), num_random_weights_(num_random_weights) {} Weight operator()() const { const int n = (rand() % // NOLINT (2 * num_random_weights_ + allow_zero_)) - num_random_weights_; if (allow_zero_ && n == num_random_weights_) { return Weight::Zero(); } else if (n == -num_random_weights_) { return Weight::One(); } else { return Weight(n); } } private: // Permits Zero() and zero divisors. const bool allow_zero_; // Number of alternative random weights. const size_t num_random_weights_; }; } // namespace fst #endif // FST_FLOAT_WEIGHT_H_
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/ctcdecode/ctc_beam_search_decoder.h
#ifndef CTC_BEAM_SEARCH_DECODER_H_ #define CTC_BEAM_SEARCH_DECODER_H_ #include <memory> #include <string> #include <vector> #include "scorer.h" #include "output.h" #include "alphabet.h" #include "flashlight/lib/text/decoder/Decoder.h" struct DecoderState { int abs_time_step_; int space_id_; int blank_id_; size_t beam_size_; double cutoff_prob_; size_t cutoff_top_n_; bool start_expanding_; Alphabet alphabet_; std::shared_ptr<Scorer> ext_scorer_; std::vector<PathTrie*> prefixes_; std::unique_ptr<PathTrie> prefix_root_; TimestepTreeNode timestep_tree_root_{nullptr, 0}; std::unordered_map<std::string, float> hot_words_; std::unordered_map<size_t, size_t> am_token_to_scorer_; std::unordered_map<size_t, size_t> scorer_token_to_am_; DecoderState() = default; virtual ~DecoderState() = default; // Disallow copying DecoderState(const DecoderState&) = delete; DecoderState& operator=(DecoderState&) = delete; /* Initialize CTC beam search decoder * * Parameters: * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * Return: * Zero on success, non-zero on failure. */ int init(const Alphabet& alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words); void init_token_mapping(); /* Send data to the decoder * * Parameters: * probs: 2-D vector where each element is a vector of probabilities * over alphabet of one time step. * time_dim: Number of timesteps. * class_dim: Number of classes (alphabet length + 1 for space character). */ void next(const double *probs, int time_dim, int class_dim); /* Get up to num_results transcriptions from current decoder state. * * Parameters: * num_results: Number of beams to return. * * Return: * A vector where each element is a pair of score and decoding result, * in descending order. */ std::vector<Output> decode(size_t num_results=1) const; // Get pruned emissions for each time step's beam search virtual std::vector<std::pair<size_t, float>> get_pruned_emissions( const double *prob_step, size_t class_dim); }; struct CTCDecoderForWav2vec2AM : DecoderState { std::unordered_set<unsigned int> ignored_symbols_; /* Initialize decoder * * Parameters: * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * blank_id: Index of CTC blank symbol in AM output. * ignored_symbols: Indices of symbols in AM output to ignore for decoding (eg. <s>, </s>). * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * Return: * Zero on success, non-zero on failure. */ int init(const Alphabet& alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, int blank_id, const std::vector<unsigned int>& ignored_symbols, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words); void init_token_mapping(); // Get pruned emissions for each time step's beam search std::vector<std::pair<size_t, float>> get_pruned_emissions( const double *prob_step, size_t class_dim) override; }; class FlashlightDecoderState { public: FlashlightDecoderState() = default; ~FlashlightDecoderState() = default; // Disallow copying FlashlightDecoderState(const FlashlightDecoderState&) = delete; FlashlightDecoderState& operator=(FlashlightDecoderState&) = delete; enum LMTokenType { Single // LM units == AM units (character/byte LM) ,Aggregate // LM units != AM units (word LM) }; enum DecoderType { LexiconBased ,LexiconFree }; enum CriterionType { ASG = 0 ,CTC = 1 ,S2S = 2 }; /* Initialize beam search decoder * * Parameters: * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * Return: * Zero on success, non-zero on failure. */ int init(const Alphabet& alphabet, size_t beam_size, double beam_threshold, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, FlashlightDecoderState::LMTokenType token_type, fl::lib::text::Dictionary lm_tokens, FlashlightDecoderState::DecoderType decoder_type, double silence_score, bool merge_with_log_add, FlashlightDecoderState::CriterionType criterion_type, std::vector<float> transitions); /* Send data to the decoder * * Parameters: * probs: 2-D vector where each element is a vector of probabilities * over alphabet of one time step. * time_dim: Number of timesteps. * class_dim: Number of classes (alphabet length + 1 for space character). */ void next(const double *probs, int time_dim, int class_dim); /* Return current best hypothesis, optinoally pruning hypothesis space */ FlashlightOutput intermediate(bool prune = true); /* Get up to num_results transcriptions from current decoder state. * * Parameters: * num_results: Number of hypotheses to return. * * Return: * A vector where each element is a pair of score and decoding result, * in descending order. */ std::vector<FlashlightOutput> decode(size_t num_results = 1); private: fl::lib::text::Dictionary lm_tokens_; std::unique_ptr<fl::lib::text::Decoder> decoder_impl_; }; /* CTC Beam Search Decoder * Parameters: * probs: 2-D vector where each element is a vector of probabilities * over alphabet of one time step. * time_dim: Number of timesteps. * class_dim: Alphabet length (plus 1 for space character). * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A vector where each element is a pair of score and decoding result, * in descending order. */ std::vector<Output> ctc_beam_search_decoder( const double* probs, int time_dim, int class_dim, const Alphabet &alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results=1); /* CTC Beam Search Decoder * Parameters: * probs: 2-D vector where each element is a vector of probabilities * over alphabet of one time step. * time_dim: Number of timesteps. * class_dim: Alphabet length (plus 1 for space character). * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * blank_id: Index of CTC blank symbol in AM output. * ignored_symbols: Indices of symbols in AM output to ignore for decoding (eg. <s>, </s>). * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A vector where each element is a pair of score and decoding result, * in descending order. */ std::vector<Output> ctc_beam_search_decoder_for_wav2vec2am( const double *probs, int time_dim, int class_dim, const Alphabet &alphabet, size_t beam_size, double cutoff_prob, size_t cutoff_top_n, int blank_id, const std::vector<unsigned int>& ignored_symbols, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results); /* CTC Beam Search Decoder for batch data * Parameters: * probs: 3-D vector where each element is a 2-D vector that can be used * by ctc_beam_search_decoder(). * alphabet: The alphabet. * beam_size: The width of beam search. * num_processes: Number of threads for beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A 2-D vector where each element is a vector of beam search decoding * result for one audio sample. */ std::vector<std::vector<Output>> ctc_beam_search_decoder_batch( const double* probs, int batch_size, int time_dim, int class_dim, const int* seq_lengths, int seq_lengths_size, const Alphabet &alphabet, size_t beam_size, size_t num_processes, double cutoff_prob, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results=1); /* CTC Beam Search Decoder for batch data * Parameters: * probs: 3-D vector where each element is a 2-D vector that can be used * by ctc_beam_search_decoder(). * alphabet: The alphabet. * beam_size: The width of beam search. * num_processes: Number of threads for beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * blank_id: Index of CTC blank symbol in AM output. * ignored_symbols: Indices of symbols in AM output to ignore for decoding (eg. <s>, </s>). * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A 2-D vector where each element is a vector of beam search decoding * result for one audio sample. */ std::vector<std::vector<Output>> ctc_beam_search_decoder_for_wav2vec2am_batch( const double* probs, int batch_size, int time_dim, int class_dim, const int* seq_lengths, int seq_lengths_size, const Alphabet &alphabet, size_t beam_size, size_t num_processes, double cutoff_prob, size_t cutoff_top_n, int blank_id, const std::vector<unsigned int>& ignored_symbols, std::shared_ptr<Scorer> ext_scorer, std::unordered_map<std::string, float> hot_words, size_t num_results=1); /* Flashlight Beam Search Decoder * Parameters: * probs: 2-D vector where each element is a vector of probabilities * over alphabet of one time step. * time_dim: Number of timesteps. * class_dim: Alphabet length (plus 1 for space character). * alphabet: The alphabet. * beam_size: The width of beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A vector where each element is a pair of score and decoding result, * in descending order. */ std::vector<FlashlightOutput> flashlight_beam_search_decoder( const double* probs, int time_dim, int class_dim, const Alphabet& alphabet, size_t beam_size, double beam_threshold, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, FlashlightDecoderState::LMTokenType token_type, const std::vector<std::string>& lm_tokens, FlashlightDecoderState::DecoderType decoder_type, double silence_score, bool merge_with_log_add, FlashlightDecoderState::CriterionType criterion_type, std::vector<float> transitions, size_t num_results); /* Flashlight Beam Search Decoder for batch data * Parameters: * probs: 3-D vector where each element is a 2-D vector that can be used * by flashlight_beam_search_decoder(). * alphabet: The alphabet. * beam_size: The width of beam search. * num_processes: Number of threads for beam search. * cutoff_prob: Cutoff probability for pruning. * cutoff_top_n: Cutoff number for pruning. * ext_scorer: External scorer to evaluate a prefix, which consists of * n-gram language model scoring and word insertion term. * Default null, decoding the input sample without scorer. * hot_words: A map of hot-words and their corresponding boosts * The hot-word is a string and the boost is a float. * num_results: Number of beams to return. * Return: * A 2-D vector where each element is a vector of beam search decoding * result for one audio sample. */ std::vector<std::vector<FlashlightOutput>> flashlight_beam_search_decoder_batch( const double* probs, int batch_size, int time_dim, int class_dim, const int* seq_lengths, int seq_lengths_size, const Alphabet& alphabet, size_t beam_size, double beam_threshold, size_t cutoff_top_n, std::shared_ptr<Scorer> ext_scorer, FlashlightDecoderState::LMTokenType token_type, const std::vector<std::string>& lm_tokens, FlashlightDecoderState::DecoderType decoder_type, double silence_score, bool merge_with_log_add, FlashlightDecoderState::CriterionType criterion_type, std::vector<float> transitions, size_t num_results, size_t num_processes); #endif // CTC_BEAM_SEARCH_DECODER_H_
0
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text
coqui_public_repos/STT/native_client/ctcdecode/third_party/flashlight/flashlight/lib/text/decoder/LexiconSeq2SeqDecoder.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <memory> #include <unordered_map> #include "flashlight/lib/text/decoder/Decoder.h" #include "flashlight/lib/text/decoder/Trie.h" #include "flashlight/lib/text/decoder/lm/LM.h" namespace fl { namespace lib { namespace text { using AMStatePtr = std::shared_ptr<void>; using AMUpdateFunc = std::function< std::pair<std::vector<std::vector<float>>, std::vector<AMStatePtr>>( const float*, const int, const int, const std::vector<int>&, const std::vector<AMStatePtr>&, int&)>; struct LexiconSeq2SeqDecoderOptions { int beamSize; // Maximum number of hypothesis we hold after each step int beamSizeToken; // Maximum number of tokens we consider at each step double beamThreshold; // Threshold to prune hypothesis double lmWeight; // Weight of lm double wordScore; // Word insertion score double eosScore; // Score for inserting an EOS bool logAdd; // If or not use logadd when merging hypothesis }; /** * LexiconSeq2SeqDecoderState stores information for each hypothesis in the * beam. */ struct LexiconSeq2SeqDecoderState { double score; // Accumulated total score so far LMStatePtr lmState; // Language model state const TrieNode* lex; const LexiconSeq2SeqDecoderState* parent; // Parent hypothesis int token; // Label of token int word; AMStatePtr amState; // Acoustic model state double amScore; // Accumulated AM score so far double lmScore; // Accumulated LM score so far LexiconSeq2SeqDecoderState( const double score, const LMStatePtr& lmState, const TrieNode* lex, const LexiconSeq2SeqDecoderState* parent, const int token, const int word, const AMStatePtr& amState, const double amScore = 0, const double lmScore = 0) : score(score), lmState(lmState), lex(lex), parent(parent), token(token), word(word), amState(amState), amScore(amScore), lmScore(lmScore) {} LexiconSeq2SeqDecoderState() : score(0), lmState(nullptr), lex(nullptr), parent(nullptr), token(-1), word(-1), amState(nullptr), amScore(0.), lmScore(0.) {} int compareNoScoreStates(const LexiconSeq2SeqDecoderState* node) const { int lmCmp = lmState->compare(node->lmState); if (lmCmp != 0) { return lmCmp > 0 ? 1 : -1; } else if (lex != node->lex) { return lex > node->lex ? 1 : -1; } else if (token != node->token) { return token > node->token ? 1 : -1; } return 0; } int getWord() const { return word; } }; /** * Decoder implements a beam seach decoder that finds the token transcription * W maximizing: * * AM(W) + lmWeight_ * log(P_{lm}(W)) + eosScore_ * |W_last == EOS| * * where P_{lm}(W) is the language model score. The transcription W is * constrained by a lexicon. The language model may operate at word-level * (isLmToken=false) or token-level (isLmToken=true). * * TODO: Doesn't support online decoding now. * */ class LexiconSeq2SeqDecoder : public Decoder { public: LexiconSeq2SeqDecoder( LexiconSeq2SeqDecoderOptions opt, const TriePtr& lexicon, const LMPtr& lm, const int eos, AMUpdateFunc amUpdateFunc, const int maxOutputLength, const bool isLmToken) : opt_(std::move(opt)), lm_(lm), lexicon_(lexicon), eos_(eos), amUpdateFunc_(amUpdateFunc), maxOutputLength_(maxOutputLength), isLmToken_(isLmToken) {} void decodeStep(const float* emissions, int T, int N) override; void prune(int lookBack = 0) override; int nDecodedFramesInBuffer() const override; DecodeResult getBestHypothesis(int lookBack = 0) const override; std::vector<DecodeResult> getAllFinalHypothesis() const override; protected: LexiconSeq2SeqDecoderOptions opt_; LMPtr lm_; TriePtr lexicon_; int eos_; AMUpdateFunc amUpdateFunc_; std::vector<int> rawY_; std::vector<AMStatePtr> rawPrevStates_; int maxOutputLength_; bool isLmToken_; std::vector<LexiconSeq2SeqDecoderState> candidates_; std::vector<LexiconSeq2SeqDecoderState*> candidatePtrs_; double candidatesBestScore_; std::unordered_map<int, std::vector<LexiconSeq2SeqDecoderState>> hyp_; }; } // namespace text } // namespace lib } // namespace fl
0
coqui_public_repos/STT/native_client
coqui_public_repos/STT/native_client/java/Makefile
.PHONY: clean apk-clean include ../definitions.mk ARCHS := $(shell grep 'ABI_FILTERS' libstt/gradle.properties | cut -d'=' -f2 | sed -e 's/;/ /g') GRADLE ?= ./gradlew all: apk clean: apk-clean rm -rf *.java jni/stt_wrap.cpp apk-clean: $(GRADLE) clean libs-clean: rm -fr libstt/libs/*/libstt.so libstt/libs/%/libstt.so: -mkdir libstt/libs/$*/ cp ${TFDIR}/bazel-out/$*-*/bin/native_client/libstt.so libstt/libs/$*/ apk: apk-clean bindings $(patsubst %,libstt/libs/%/libstt.so,$(ARCHS)) $(GRADLE) build maven-bundle: apk $(GRADLE) uploadArchives $(GRADLE) zipMavenArtifacts bindings: clean ds-swig $(DS_SWIG_ENV) swig -c++ -java -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt/ -o jni/stt_wrap.cpp jni/stt.i
0
coqui_public_repos/inference-engine/third_party/cereal/include/cereal
coqui_public_repos/inference-engine/third_party/cereal/include/cereal/types/utility.hpp
/*! \file utility.hpp \brief Support for types found in \<utility\> \ingroup STLSupport */ /* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CEREAL_TYPES_UTILITY_HPP_ #define CEREAL_TYPES_UTILITY_HPP_ #include "cereal/cereal.hpp" #include <utility> namespace cereal { //! Serializing for std::pair template <class Archive, class T1, class T2> inline void CEREAL_SERIALIZE_FUNCTION_NAME( Archive & ar, std::pair<T1, T2> & pair ) { ar( CEREAL_NVP_("first", pair.first), CEREAL_NVP_("second", pair.second) ); } } // namespace cereal #endif // CEREAL_TYPES_UTILITY_HPP_
0
coqui_public_repos/STT
coqui_public_repos/STT/taskcluster/node-package-cpu.yml
build: template_file: node-package-opt-base.tyml dependencies: - "darwin-amd64-cpu-opt" - "linux-amd64-cpu-opt" - "linux-rpi3-cpu-opt" - "linux-arm64-cpu-opt" - "win-amd64-cpu-opt" system_setup: > ${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml scripts: build: "taskcluster/node-build.sh" package: "taskcluster/node-package.sh" workerType: "${docker.smallTask}" metadata: name: "DeepSpeech NodeJS CPU package" description: "Packaging DeepSpeech CPU for registry"
0