diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/Dockerfile.training b/Dockerfile.training
new file mode 100644
index 0000000..5a512cb
--- /dev/null
+++ b/Dockerfile.training
@@ -0,0 +1,51 @@
+# cat Dockerfile.training | docker build --build-arg=base=debian:9.8 -t daanzu/kaldi_ag_training .
+# cat Dockerfile.training | docker build --build-arg=base=nvidia/cuda:10.0-cudnn7-devel-ubuntu16.04 --build-arg=cuda=yes -t daanzu/kaldi_ag_training_gpu .
+
+ARG base
+ARG cuda=
+
+FROM $base
+LABEL maintainer="daanzu@gmail.com"
+ARG cuda
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends \
+ g++ \
+ make \
+ automake \
+ autoconf \
+ bzip2 \
+ unzip \
+ wget \
+ sox \
+ libtool \
+ git \
+ subversion \
+ python2.7 \
+ python3 \
+ zlib1g-dev \
+ ca-certificates \
+ gfortran \
+ patch \
+ ffmpeg \
+ vim && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN ln -s /usr/bin/python2.7 /usr/bin/python
+
+RUN git clone --depth 1 https://github.com/daanzu/kaldi-fork-active-grammar /opt/kaldi && \
+ cd /opt/kaldi/tools && \
+ ./extras/install_mkl.sh && \
+ make -j $(nproc) && \
+ cd /opt/kaldi/src && \
+ ./configure --shared ${cuda:+--use-cuda} && \
+ make depend -j $(nproc) && \
+ make -j $(nproc) && \
+ find /opt/kaldi -type f \( -name "*.o" -o -name "*.la" -o -name "*.a" \) -exec rm {} \; && \
+ find /opt/intel -type f -name "*.a" -exec rm {} \; && \
+ find /opt/intel -type f -regex '.*\(_mic\|_thread\|_ilp64\)\.so' -exec rm {} \; && \
+ rm -rf /opt/kaldi/.git
+
+# _mc included for pre-AVX CPUs
+
+WORKDIR /opt/kaldi/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..be3f7b2
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a2c44a5
--- /dev/null
+++ b/README.md
@@ -0,0 +1,54 @@
+# Kaldi AG Training Setup
+
+[![Donate](https://img.shields.io/badge/donate-GitHub-pink.svg)](https://github.com/sponsors/daanzu)
+[![Donate](https://img.shields.io/badge/donate-Patreon-orange.svg)](https://www.patreon.com/daanzu)
+[![Donate](https://img.shields.io/badge/donate-PayPal-green.svg)](https://paypal.me/daanzu)
+
+Docker image and scripts for training finetuned or completely personal speech models. Particularly for use with [kaldi-active-grammar](https://github.com/daanzu/kaldi-active-grammar).
+
+## Usage
+
+All commands are run in the Docker container as follows. Training on the CPU should work, just much more slowly. To do so, remove the `--runtime=nvidia` and use the image `daanzu/kaldi_ag_training_gpu:2021-08-04` instead the GPU image.
+
+```bash
+docker run -it --rm -v $(pwd):/mnt/input -w /mnt/input --user "$(id -u):$(id -g)" \
+ --runtime=nvidia daanzu/kaldi_ag_training_gpu:2021-08-04 \
+ [command and args...]
+```
+
+Example commands:
+
+```bash
+# Prepare training dataset files
+python3 convert_tsv_to_scp.py -l kaldi_model_daanzu_20200905_1ep-mediumlm-base/dict/lexicon.txt yourdata.tsv [optional output directory]
+
+# Pick only one of the following:
+# Run finetune training, with default settings
+docker run [...] bash run.finetune.sh kaldi_model_daanzu_20200905_1ep-mediumlm-base dataset
+# Run completely personal training, with default settings
+docker run [...] bash run.personal.sh kaldi_model_daanzu_20200905_1ep-mediumlm-base dataset
+
+# When training completes, export trained model
+python3 export_trained_model.py {finetune,personal} [optional output directory]
+```
+
+### Notes
+
+* To run either training, you must have a base model to use as a template. (For finetuning this is also the starting point of the model; for personal it is only a source of basic info.) You can use [this base model](https://github.com/daanzu/kaldi_ag_training/releases/download/v0.1.0/kaldi_model_daanzu_20200905_1ep-mediumlm-base.zip) from this project's release page. Download the zip file and extract it to the root directory of this repo, so the directory `kaldi_model_daanzu_20200905_1ep-mediumlm-base` is here.
+
+* Kaldi requires the training data metadata to be in the SCP format, which is an annoying multi-file format. To convert the standard KaldiAG TSV format to SCP, you can run `python3 convert_tsv_to_scp.py yourdata.tsv dataset` to output SCP format in a new directory `dataset`. You can run these commands within the Docker container, or directly using your own python environment.
+ * Even better, run `python3 convert_tsv_to_scp.py -l kaldi_model_daanzu_20200905_1ep-mediumlm-base/dict/lexicon.txt yourdata.tsv dataset` to filter out utterances containing out-of-vocabulary words. OOV words are not currently well supported by these training scripts.
+
+* The audio data should be 16-bit Signed Integer PCM 1-channel 16kHz WAV files. Note that it needs to be accessible within the Docker container, so it can't be behind a symlink that points outside this repo directory, which is shared with the Docker container.
+
+* There are some directory names you should avoid using in this repo directory, because the scripts will create & use them during training. Avoid: `conf`, `data`, `exp`, `extractor`, `mfcc`, `steps`, `tree_sp`, `utils`.
+
+* Training may use a lot of storage. You may want to locate this directory somewhere with ample room available.
+
+* The training commands (`run.*.sh`) accept many optional parameters. More info later.
+
+ * `--stage n` : Skip to given stage
+
+## License
+
+This project is licensed under the GNU Affero General Public License v3 (AGPL-3.0-or-later). See the [LICENSE file](LICENSE) for details. If this license is problematic for you, please contact me.
diff --git a/cmd.sh b/cmd.sh
new file mode 100644
index 0000000..41cbd6c
--- /dev/null
+++ b/cmd.sh
@@ -0,0 +1,4 @@
+export train_cmd="utils/run.pl"
+export decode_cmd="utils/run.pl"
+export cuda_cmd="utils/run.pl"
+# export cuda_cmd="utils/run.pl -l gpu=1"
diff --git a/convert_tsv_to_scp.py b/convert_tsv_to_scp.py
new file mode 100644
index 0000000..c7adb1e
--- /dev/null
+++ b/convert_tsv_to_scp.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+import argparse, os
+
+parser = argparse.ArgumentParser(description='Convert a TSV file to Kaldi SCP files.')
+parser.add_argument('filename', help='The TSV file to convert.')
+parser.add_argument('output_dir', default='dataset', help='The directory to save the output files.')
+parser.add_argument('-l', '--lexicon_file', help='The name of the lexicon file, for filtering out out-of-vocabulary utterances.')
+args = parser.parse_args()
+
+if not os.path.exists(args.filename):
+ raise Exception('File does not exist: %s' % args.filename)
+os.mkdir(args.output_dir, exist_ok=True)
+
+lexicon = set()
+if args.lexicon_file:
+ with open(args.lexicon_file, 'r') as f:
+ for line in f:
+ word, num = line.strip().split()
+ lexicon.add(word)
+
+utt2spk_dict, wav_dict, text_dict = {}, {}, {}
+with open(args.filename, 'r') as f:
+ for line in f:
+ fields = line.strip().split('\t')
+ text = fields[4]
+ wav_path = fields[0]
+ utt_id = os.path.splitext(os.path.basename(wav_path))[0]
+ if lexicon and any([word not in lexicon for word in text.split()]):
+ continue
+ utt2spk_dict[utt_id] = utt_id
+ wav_dict[utt_id] = wav_path
+ text_dict[utt_id] = text
+
+with open(os.path.join(dir, 'utt2spk'), 'w') as f:
+ for (key, val) in utt2spk_dict.items():
+ f.write('%s %s\n' % (key, val))
+with open(os.path.join(dir, 'wav.scp'), 'w') as f:
+ for (key, val) in wav_dict.items():
+ f.write('%s %s\n' % (key, val))
+with open(os.path.join(dir, 'text'), 'w') as f:
+ for (key, val) in text_dict.items():
+ f.write('%s %s\n' % (key, val))
+
+print(f"Wrote training dataset to {args.output_dir}")
diff --git a/export_trained_model.py b/export_trained_model.py
new file mode 100644
index 0000000..186a2d9
--- /dev/null
+++ b/export_trained_model.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+import argparse, os, shutil
+
+parser = argparse.ArgumentParser(description='Export trained model.')
+parser.add_argument('type', choices=('personal', 'finetune'), help='Type of trained model.')
+parser.add_argument('output_dir', default='exported_model', help='Directory to save the output model.')
+parser.add_argument('-b', '--base_model_dir', default='kaldi_model_daanzu_20200905_1ep-mediumlm-base', help='Directory of model to copy base files from.')
+args = parser.parse_args()
+
+if not os.path.exists(args.base_model_dir):
+ raise Exception('Base model directory does not exist.')
+if os.path.exists(args.output_dir):
+ raise Exception('Output directory already exists.')
+os.mkdir(args.output_dir, exist_ok=True)
+shutil.copytree(args.base_model_dir, args.output_dir, ignore=shutil.ignore_patterns('dict', 'tree_stuff'))
+os.mkdir(os.path.join(args.output_dir, 'training'), exist_ok=True)
+
+if args.type == 'personal':
+ for name in 'final.mdl tree'.split():
+ shutil.copy2(os.path.join('exp/chain/tdnn1h_sp_online', name), args.output_dir)
+ for name in 'final.dubm final.ie final.mat global_cmvn.stats'.split():
+ shutil.copy2(os.path.join('exp/chain/tdnn1h_sp_online', 'ivector_extractor', name), os.path.join(args.output_dir, 'ivector_extractor'))
+ shutil.copy2('exp/chain/tdnn1h_sp/accuracy.report', os.path.join(args.output_dir, 'training'))
+ shutil.copy2('params.txt', os.path.join(args.output_dir, 'training'))
+
+elif args.type == 'finetune':
+ for name in 'final.mdl'.split():
+ shutil.copy2(os.path.join('exp/nnet3_chain/finetune', name), args.output_dir)
+ shutil.copy2('exp/nnet3_chain/finetune/accuracy.report', os.path.join(args.output_dir, 'training'))
+
+print(f"Wrote exported {args.type} model to {args.output_dir}")
+print("NOTE: You still must run the following in your kaldi-active-grammar python environment:")
+print("python -m kaldi_active_grammar compile_agf_dictation_graph -v -m [model_dir] G.fst")
diff --git a/path.sh b/path.sh
new file mode 100644
index 0000000..c397f99
--- /dev/null
+++ b/path.sh
@@ -0,0 +1,4 @@
+export KALDI_ROOT=/opt/kaldi
+export LD_LIBRARY_PATH="$KALDI_ROOT/tools/openfst-1.3.4/lib:$KALDI_ROOT/src/lib:$LD_LIBRARY_PATH"
+export PATH=$KALDI_ROOT/src/lmbin/:$KALDI_ROOT/../kaldi_lm/:$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/kwsbin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/chainbin:$KALDI_ROOT/src/rnnlmbin:$PWD:$PATH
+export LC_ALL=C
diff --git a/run.finetune.sh b/run.finetune.sh
new file mode 100644
index 0000000..493804d
--- /dev/null
+++ b/run.finetune.sh
@@ -0,0 +1,84 @@
+# docker run -it --rm -v $(pwd):/mnt/input -v $(pwd)/work:/mnt/work -w /mnt/work --user "$(id -u):$(id -g)" daanzu/kaldi_ag_training:2020-11-28 bash run.finetune.sh models/kaldi_model_daanzu_20200905_1ep-mediumlm data/standard2train --num-epochs 5 --train-stage -10 --stage 1
+# docker run -it --rm -v $(pwd):/mnt/input -v $(pwd)/work:/mnt/work -w /mnt/work --user "$(id -u):$(id -g)" --runtime=nvidia daanzu/kaldi_ag_training_gpu:2020-11-28 bash run.finetune.sh models/kaldi_model_daanzu_20200905_1ep-mediumlm data/standard2train --num-epochs 5 --train-stage -10 --stage 1
+
+set -euxo pipefail
+
+nice_cmd="nice ionice -c idle"
+
+[[ $# -ge 2 ]] || exit 1
+
+model=/mnt/input/$1; shift
+dataset=/mnt/input/$1; shift
+
+[[ -d $model ]] || exit 1
+[[ -d $dataset ]] || exit 1
+
+echo "base_model=${model#/mnt/input/}" >> params.txt
+echo "train_dataset=${dataset#/mnt/input/}" >> params.txt
+
+cat <<\EOF > cmd.sh
+export train_cmd="utils/run.pl"
+export decode_cmd="utils/run.pl"
+export cuda_cmd="utils/run.pl"
+# export cuda_cmd="utils/run.pl -l gpu=1"
+EOF
+cat <<\EOF > path.sh
+export KALDI_ROOT=/opt/kaldi
+export LD_LIBRARY_PATH="$KALDI_ROOT/tools/openfst/lib:$KALDI_ROOT/tools/openfst/lib/fst:$KALDI_ROOT/src/lib:$LD_LIBRARY_PATH"
+export PATH=$KALDI_ROOT/src/lmbin/:$KALDI_ROOT/../kaldi_lm/:$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/kwsbin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/chainbin:$KALDI_ROOT/src/rnnlmbin:$PWD:$PATH
+export LC_ALL=C
+EOF
+ln -sf /opt/kaldi/egs/wsj/s5/steps
+ln -sf /opt/kaldi/egs/wsj/s5/utils
+
+mkdir -p conf data/{lang/phones,finetune} exp extractor
+cp $model/conf/{mfcc,mfcc_hires}.conf conf/
+cp $model/conf/online_cmvn.conf conf/ # Only needed if/for finetune_ivector_extractor
+cp $model/conf/online_cmvn.conf extractor/
+# cp $model/ivector_extractor/final.{ie,dubm,mat} extractor/ # Careful not to overwrite finetuned ivector_extractor!
+cp $model/ivector_extractor/global_cmvn.stats extractor/
+cp $model/conf/online_cmvn_iextractor extractor/ 2>/dev/null || true
+cp $model/conf/splice.conf extractor/splice_opts
+echo "18" > data/lang/oov.int
+cp $model/{words,phones}.txt data/lang/
+cp $model/disambig.int data/lang/phones/
+cp $model/wdisambig_{words,phones}.int data/lang/phones/ # Only needed if/for mkgraph.sh
+echo "3" > $model/frame_subsampling_factor
+
+echo "1:2:3:4:5:6:7:8:9:10:11:12:13:14:15" > data/lang/phones/context_indep.csl
+echo "1:2:3:4:5:6:7:8:9:10:11:12:13:14:15" > data/lang/phones/silence.csl
+
+. path.sh
+
+# ln -sfT $model/tree_sp tree_sp
+rm tree_sp 2> /dev/null || true
+mkdir -p tree_sp
+cp $model/phones.txt tree_sp/
+mkdir -p exp/nnet3_chain/finetune/
+cp -r $model/dict data/ # Only needed if/for finetune_tree
+# cp $model/tree_stuff/topo data/lang/ # Only needed if/for finetune_tree
+# cp $model/tree_stuff/sets.int data/lang/phones/ # Only needed if/for finetune_tree
+
+# Skip train.py::create_phone_lm()
+touch tree_sp/ali.1.gz tree_sp/tree tree_sp/final.mdl # Fake empty, to pacify the training script later
+
+# Skip train.py::create_denominator_fst()
+copy-transition-model $model/final.mdl exp/nnet3_chain/finetune/0.trans_mdl 2> /dev/null
+cp $model/tree $model/tree_stuff/{den,normalization}.fst exp/nnet3_chain/finetune/
+
+perl -ane '@A=split(" ",$_); $w = shift @A; $p = shift @A; @A>0||die;
+ if(@A==1) { print "$w $p $A[0]_S\n"; } else { print "$w $p $A[0]_B ";
+ for($n=1;$n<@A-1;$n++) { print "$A[$n]_I "; } print "$A[$n]_E\n"; } ' \
+ < $model/lexiconp.txt > data/lang/lexiconp_pdp.txt || exit 1;
+utils/lang/make_lexicon_fst.py --sil-prob=0.5 --sil-phone=SIL data/lang/lexiconp_pdp.txt | \
+ fstcompile --isymbols=$model/phones.txt --osymbols=$model/words.txt --keep_isymbols=false --keep_osymbols=false | \
+ fstarcsort --sort_type=olabel > data/lang/L.fst || exit 1
+
+cp -r $dataset/{text,wav.scp,utt2spk} data/finetune
+# ln -sfT /mnt/input/audio_data audio_data
+
+# utils/fix_data_dir.sh data/finetune
+$nice_cmd bash run_finetune_tdnn_1a_daanzu.sh --src-dir $model --extractor-dir extractor --tree-dir tree_sp --nj $(nproc) $*
+
+# > cp -r work.test.per/data/lang/phones/* work.test.fin/data/lang/phones/
+# > cp -r work.test.per/data/lang_chain/topo work.test.fin/data/lang/
diff --git a/run.personal.sh b/run.personal.sh
new file mode 100644
index 0000000..9cfcba9
--- /dev/null
+++ b/run.personal.sh
@@ -0,0 +1,68 @@
+# docker run -it --rm -v $(pwd):/mnt/input -v $(pwd)/work:/mnt/work -w /mnt/work --user "$(id -u):$(id -g)" daanzu/kaldi_ag_training:2020-11-28 bash run.personal.sh models/kaldi_model_daanzu_20200905_1ep-mediumlm data/standard2train --num-epochs 5 --stage -10
+# docker run -it --rm -v $(pwd):/mnt/input -v $(pwd)/work:/mnt/work -w /mnt/work --user "$(id -u):$(id -g)" --runtime=nvidia daanzu/kaldi_ag_training_gpu:2020-11-28 bash run.personal.sh models/kaldi_model_daanzu_20200905_1ep-mediumlm data/standard2train --num-epochs 5 --stage -10
+
+set -euxo pipefail
+
+nice_cmd="nice ionice -c idle"
+stage=-10
+gmm_stage=0 # always stage+10
+
+# Scan through arguments, checking for stage argument, which if included we need to use to set the gmm_stage
+POSITIONAL=()
+while [[ $# -gt 0 ]]
+do
+key="$1"
+case $key in
+ --stage)
+ stage="$2"
+ gmm_stage=$((stage+10))
+ POSITIONAL+=("$1" "$2") # save it in an array for later
+ shift # past argument
+ shift # past value
+ ;;
+ *) # unknown option
+ POSITIONAL+=("$1") # save it in an array for later
+ shift # past argument
+ ;;
+esac
+done
+set -- "${POSITIONAL[@]}" # restore positional parameters
+
+[[ $# -ge 2 ]] || exit 1
+
+model=/mnt/input/$1; shift
+dataset=/mnt/input/$1; shift
+
+[[ -d $model ]] || exit 1
+[[ -d $dataset ]] || exit 1
+
+echo "base_model=${model#/mnt/input/}" >> params.txt
+echo "train_dataset=${dataset#/mnt/input/}" >> params.txt
+
+cat <<\EOF > cmd.sh
+export train_cmd="utils/run.pl"
+export decode_cmd="utils/run.pl"
+export cuda_cmd="utils/run.pl"
+# export cuda_cmd="utils/run.pl -l gpu=1"
+EOF
+cat <<\EOF > path.sh
+export KALDI_ROOT=/opt/kaldi
+export LD_LIBRARY_PATH="$KALDI_ROOT/tools/openfst/lib:$KALDI_ROOT/tools/openfst/lib/fst:$KALDI_ROOT/src/lib:$LD_LIBRARY_PATH"
+export PATH=$KALDI_ROOT/src/lmbin/:$KALDI_ROOT/../kaldi_lm/:$PWD/utils/:$KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin/:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/kwsbin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/chainbin:$KALDI_ROOT/src/rnnlmbin:$PWD:$PATH
+export LC_ALL=C
+EOF
+ln -sf /opt/kaldi/egs/wsj/s5/steps
+ln -sf /opt/kaldi/egs/wsj/s5/utils
+
+mkdir -p data/train data/dict conf exp
+cp $model/conf/{mfcc,mfcc_hires,online_cmvn}.conf conf
+cp $model/dict/{extra_questions.txt,lexiconp.txt,lexicon.txt,nonsilence_phones.txt,optional_silence.txt,silence_phones.txt} data/dict
+
+[[ $stage -gt -10 ]] || rm -rf data/train/*
+cp $dataset/{text,wav.scp,utt2spk} data/train
+# ln -sfT /mnt/input/audio_data audio_data
+# ln -sfT /mnt/input/audio_data/daanzu wav
+
+# utils/fix_data_dir.sh data/train
+$nice_cmd bash run_personal_gmm.sh --nj $(nproc) --stage $gmm_stage
+$nice_cmd bash run_personal_chain_tdnn_1h.sh --nj $(nproc) $*
diff --git a/run_finetune_tdnn_1a_daanzu.sh b/run_finetune_tdnn_1a_daanzu.sh
new file mode 100644
index 0000000..3918e5f
--- /dev/null
+++ b/run_finetune_tdnn_1a_daanzu.sh
@@ -0,0 +1,305 @@
+# !/bin/bash
+
+# Adapted from egs/aishell2/s5/local/nnet3/tuning/finetune_tdnn_1a.sh commit 42a673a5e7f201736dfbf2116e8eaa94745e5a5f
+# Also see:
+# egs/rm/s5/local/chain/tuning/run_tdnn_wsj_rm_1b.sh
+# egs/rm/s5/local/chain/tuning/run_tdnn_wsj_rm_1c.sh
+
+# This script uses weight transfer as a transfer learning method to transfer already trained neural net model to a finetune dataset.
+
+# Usage: run_finetune_tdnn_1a_daanzu.sh --src-dir export/tdnn_f.1ep --num-epochs 5 --stage 1 --train-stage -10
+
+# Required Inputs:
+# data/finetune (text wav.scp utt2spk)
+# src_dir
+# tree_dir (tree final.mdl ali.*.gz phones.txt)
+# lang_dir (oov.int L.fst words.txt phones.txt phones/disambig.int)
+# conf_dir (mfcc.conf mfcc_hires.conf)
+# extractor_dir (final.ie final.dubm final.mat global_cmvn.stats splice_opts online_cmvn.conf online_cmvn_iextractor?)
+# Writes To:
+# data/finetune, data/finetune_hires, data/finetune_sp, data/finetune_sp_hires,
+# exp/make_mfcc_chain/finetune, exp/make_mfcc_chain/finetune_sp_hires, exp/make_mfcc_chain/finetune_hires,
+# exp/nnet3_chain/ivectors_finetune_hires, exp/finetune_lats, exp/nnet3_chain/finetune
+
+set -e
+
+data_set=finetune
+data_dir=data/${data_set}
+conf_dir=conf
+lang_dir=data/lang # FIXME: lang_chain?
+extractor_dir=exp/nnet3_chain/extractor
+# ali_dir=exp/${data_set}_ali
+# lat_dir=exp/${data_set}_lats
+src_dir=exp/nnet3_chain/tdnn_f
+tree_dir=exp/nnet3_chain/tree_sp
+# dir=${src_dir}_${data_set}
+dir=exp/nnet3_chain/${data_set}
+
+respect_speaker_info=false
+finetune_ivector_extractor=false
+finetune_phonelm=false
+
+num_gpus=1
+num_epochs=5
+# initial_lrate=0.0005
+# final_lrate=0.00002
+initial_lrate=.00025
+final_lrate=.000025
+minibatch_size=128,64
+primary_lr_factor=0.25 # learning-rate factor for all except last layer in transferred source model (last layer is 1.0)
+
+xent_regularize=0.1
+train_stage=-4 # Normally default -10, but here -4 to skip phone_LM and den.fst generation training stages.
+get_egs_stage=-10
+common_egs_dir= # you can set this to use previously dumped egs.
+egs_opts="--num-utts-subset 3000 --max-jobs-run 4 --max-shuffle-jobs-run 10" # --num-utts-subset 3000 --max-jobs-run 4 --max-shuffle-jobs-run 10
+dropout_schedule='0,0@0.20,0.5@0.50,0'
+frames_per_eg=150,110,100
+chain_left_tolerance=1
+chain_right_tolerance=1
+
+stage=1
+nj=8
+
+echo "$0 $@" # Print the command line for logging
+. ./path.sh
+. ./cmd.sh
+. ./utils/parse_options.sh
+
+if [ "$num_gpus" -eq 0 ]; then
+ gpu_opt="no"
+ num_gpus=1
+else
+ gpu_opt="wait"
+fi
+
+function write_params() {
+ for v in $*; do
+ echo "${v}=${!v}" >> params.txt
+ done
+}
+
+function log_stage() {
+ echo
+ echo "# Stage $1"
+ [ -z "$2" ] && echo "# $2"
+ echo "# $(date)"
+ echo
+}
+
+function compute_features() {
+ # compute_features [input_data_dir_affix]
+ rm -f ${data_dir}${2}/{cmvn.scp,feats.scp}
+ steps/make_mfcc.sh \
+ --cmd "$train_cmd" --nj $nj --mfcc-config ${1} \
+ ${data_dir}${2} exp/make_mfcc_chain/${data_set}${2}.log exp/make_mfcc_chain
+ steps/compute_cmvn_stats.sh ${data_dir}${2} exp/make_mfcc_chain/${data_set}${2}.log exp/make_mfcc_chain || exit 1;
+ utils/fix_data_dir.sh ${data_dir}${2} || exit 1;
+}
+
+if [ $stage -le 1 ]; then
+ log_stage 1 "Compute features (MFCC & CMVN stats) of the new dataset, including perturbing the data"
+ # (Approximately 0.66min single-core compute time per core per 1hr audio data)
+ utils/fix_data_dir.sh ${data_dir} || exit 1;
+
+ # Standard lores training data
+ compute_features $conf_dir/mfcc.conf
+
+ rm -rf ${data_dir}_sp
+ utils/data/perturb_data_dir_speed_3way.sh ${data_dir} ${data_dir}_sp || exit 1;
+ compute_features $conf_dir/mfcc.conf _sp
+
+ rm -rf ${data_dir}_sp_hires
+ utils/copy_data_dir.sh ${data_dir}_sp ${data_dir}_sp_hires
+ utils/data/perturb_data_dir_volume.sh ${data_dir}_sp_hires || exit 1;
+ compute_features $conf_dir/mfcc_hires.conf _sp_hires
+
+ rm -rf ${data_dir}_sp_novp_hires
+ utils/copy_data_dir.sh ${data_dir}_sp ${data_dir}_sp_novp_hires
+ compute_features $conf_dir/mfcc_hires.conf _sp_novp_hires
+
+ rm -rf ${data_dir}_vp_hires
+ utils/copy_data_dir.sh ${data_dir} ${data_dir}_vp_hires
+ utils/data/perturb_data_dir_volume.sh ${data_dir}_vp_hires || exit 1;
+ compute_features $conf_dir/mfcc_hires.conf _vp_hires
+
+ rm -rf ${data_dir}_hires
+ utils/copy_data_dir.sh ${data_dir} ${data_dir}_hires
+ compute_features $conf_dir/mfcc_hires.conf _hires
+fi
+
+train_affix=_hires
+# train_affix=_sp_novp_hires
+train_data_dir=${data_dir}${train_affix}
+train_ivector_dir=exp/nnet3_chain/ivectors_${data_set}${train_affix}
+lat_dir=exp/nnet3_chain/lats_${data_set}${train_affix}
+lores_train_data_dir=${data_dir}_sp
+# extractor_dir=exp/nnet3_chain/extractor_${data_set}${train_affix}
+
+if $finetune_ivector_extractor; then
+ train_set=${data_set}
+
+ if [ $stage -le 2 ]; then
+ log_stage 2 "Finetune ivectors: Train diagonal UBM"
+
+ echo "$0: computing a subset of data to train the diagonal UBM."
+ # We'll use about a quarter of the data.
+ temp_data_root=exp/nnet3_chain/diag_ubm
+ mkdir -p $temp_data_root
+
+ num_utts_total=$(wc -l > params.txt
+ done
+}
+
+if ! cuda-compiled; then
+ cat <$lang/topo
+ fi
+fi
+
+if [ $stage -le 11 ]; then
+ # Get the alignments as lattices (gives the chain training more freedom).
+ # use the same num-jobs as the alignments
+ steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" ${lores_train_data_dir} \
+ data/lang $gmm_dir $lat_dir
+ rm $lat_dir/fsts.*.gz # save space
+fi
+
+if [ $stage -le 12 ]; then
+ # Build a tree using our new topology. We know we have alignments for the
+ # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
+ # those. The num-leaves is always somewhat less than the num-leaves from
+ # the GMM baseline.
+ # if [ -f $tree_dir/final.mdl ]; then
+ # echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
+ # exit 1;
+ # fi
+ steps/nnet3/chain/build_tree.sh \
+ --frame-subsampling-factor 3 \
+ --context-opts "--context-width=2 --central-position=1" \
+ --cmd "$train_cmd" 3500 ${lores_train_data_dir} \
+ $lang $ali_dir $tree_dir
+fi
+
+if [ $tdnnf_dim -eq 738 ]; then
+ if [ $stage -le 13 ]; then
+ # from mini_librispeech tdnn_1h
+ mkdir -p $dir
+ echo "$0: creating neural net configs using the xconfig parser";
+
+ num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
+ learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
+
+ tdnn_opts="l2-regularize=0.03 dropout-proportion=0.0 dropout-per-dim-continuous=true"
+ tdnnf_opts="l2-regularize=0.03 dropout-proportion=0.0 bypass-scale=0.66"
+ linear_opts="l2-regularize=0.03 orthonormal-constraint=-1.0"
+ prefinal_opts="l2-regularize=0.03"
+ output_opts="l2-regularize=0.015"
+
+ write_params num_targets learning_rate_factor tdnn_opts tdnnf_opts linear_opts prefinal_opts output_opts
+
+ mkdir -p $dir/configs
+ cat < $dir/configs/network.xconfig
+ input dim=100 name=ivector
+ input dim=40 name=input
+
+ delta-layer name=delta
+ no-op-component name=input2 input=Append(delta, Scale(1.0, ReplaceIndex(ivector, t, 0)))
+
+ # the first splicing is moved before the lda layer, so no splicing here
+ relu-batchnorm-dropout-layer name=tdnn1 $tdnn_opts dim=768
+ tdnnf-layer name=tdnnf2 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf3 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf4 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf5 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=0
+ tdnnf-layer name=tdnnf6 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf7 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf8 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf9 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf10 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf11 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf12 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf13 $tdnnf_opts dim=768 bottleneck-dim=96 time-stride=3
+ linear-component name=prefinal-l dim=192 $linear_opts
+
+ ## adding the layers for chain branch
+ prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts small-dim=192 big-dim=768
+ output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
+
+ # adding the layers for xent branch
+ prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts small-dim=192 big-dim=768
+ output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
+EOF
+ steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
+ fi
+ initial_lrate=${initial_lrate:-0.002}
+ final_lrate=${final_lrate:-0.0002}
+
+elif [ $tdnnf_dim -eq 1024 ]; then
+ if [ $stage -le 13 ]; then
+ # from wsj tdnn_1g
+ mkdir -p $dir
+ echo "$0: creating neural net configs using the xconfig parser";
+
+ num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
+ learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
+
+ tdnn_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim-continuous=true"
+ tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66"
+ linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0"
+ prefinal_opts="l2-regularize=0.01"
+ output_opts="l2-regularize=0.005"
+
+ write_params num_targets learning_rate_factor tdnn_opts tdnnf_opts linear_opts prefinal_opts output_opts
+
+ mkdir -p $dir/configs
+ cat < $dir/configs/network.xconfig
+ input dim=100 name=ivector
+ input dim=40 name=input
+
+ delta-layer name=delta
+ no-op-component name=input2 input=Append(delta, Scale(1.0, ReplaceIndex(ivector, t, 0)))
+ # no-op-component name=input2 input=delta
+
+ relu-batchnorm-dropout-layer name=tdnn1 $tdnn_opts dim=1024 input=input2
+ tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=1
+ tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=0
+ tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1024 bottleneck-dim=96 time-stride=3
+ linear-component name=prefinal-l dim=192 $linear_opts
+
+ ## adding the layers for chain branch
+ prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts small-dim=192 big-dim=1024
+ output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
+
+ # adding the layers for xent branch
+ prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts small-dim=192 big-dim=1024
+ output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
+EOF
+ steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
+ fi
+ initial_lrate=${initial_lrate:-0.001}
+ final_lrate=${final_lrate:-0.0001}
+ # initial_lrate=0.001
+ # final_lrate=0.00005
+
+elif [ $tdnnf_dim -eq 1536 ]; then
+ if [ $stage -le 13 ]; then
+ # from chime5 tdnn_1b
+ mkdir -p $dir
+ echo "$0: creating neural net configs using the xconfig parser";
+
+ num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
+ learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
+
+ tdnn_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true"
+ tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66"
+ linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0"
+ prefinal_opts="l2-regularize=0.01"
+ output_opts="l2-regularize=0.002"
+
+ write_params num_targets learning_rate_factor tdnn_opts tdnnf_opts linear_opts prefinal_opts output_opts
+
+ mkdir -p $dir/configs
+ cat < $dir/configs/network.xconfig
+ input dim=100 name=ivector
+ input dim=40 name=input
+
+ delta-layer name=delta
+ no-op-component name=input2 input=Append(delta, Scale(1.0, ReplaceIndex(ivector, t, 0)))
+
+ relu-batchnorm-dropout-layer name=tdnn1 $tdnn_opts dim=1536 input=input2
+ tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
+ tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
+ tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
+ tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0
+ tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
+ linear-component name=prefinal-l dim=256 $linear_opts
+
+ prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
+ output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
+
+ prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
+ output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
+EOF
+ steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
+ fi
+ initial_lrate=${initial_lrate:-0.001}
+ final_lrate=${final_lrate:-0.0001}
+
+else
+ echo "$0: ERROR: invalid tdnnf_dim: ${tdnnf_dim}"; exit 1
+fi
+
+
+if [ $stage -le 14 ]; then
+ if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
+ utils/create_split_dir.pl \
+ /export/b0{3,4,5,6}/$USER/kaldi-data/egs/mini_librispeech-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
+ fi
+ [ $num_gpu_jobs -gt 1 ] && sudo nvidia-smi -c 3
+ write_params chunk_width dropout_schedule xent_regularize initial_lrate final_lrate num_epochs num_gpu_jobs stage train_stage num_utts_subset
+ # --num-valid-egs-combine --num-train-egs-combine --num-egs-diagnostic ??? see steps/nnet3/chain/get_egs.sh
+
+ steps/nnet3/chain/train.py --stage=$train_stage \
+ --cmd="$decode_cmd" \
+ --feat.online-ivector-dir=$train_ivector_dir \
+ --feat.cmvn-opts="--norm-means=false --norm-vars=false" \
+ --chain.xent-regularize $xent_regularize \
+ --chain.leaky-hmm-coefficient=0.1 \
+ --chain.l2-regularize=0.0 \
+ --chain.apply-deriv-weights=false \
+ --chain.lm-opts="--num-extra-lm-states=2000" \
+ --trainer.dropout-schedule $dropout_schedule \
+ --trainer.add-option="--optimization.memory-compression-level=2" \
+ --trainer.srand=$srand \
+ --trainer.max-param-change=2.0 \
+ --trainer.num-epochs=$num_epochs \
+ --trainer.frames-per-iter=3000000 \
+ --trainer.optimization.num-jobs-initial=$num_gpu_jobs \
+ --trainer.optimization.num-jobs-final=$num_gpu_jobs \
+ --trainer.optimization.initial-effective-lrate=$initial_lrate \
+ --trainer.optimization.final-effective-lrate=$final_lrate \
+ --trainer.num-chunk-per-minibatch=128,64 \
+ --egs.chunk-width=$chunk_width \
+ --egs.dir="$common_egs_dir" \
+ --egs.opts="--frames-overlap-per-eg 0 --num-utts-subset $num_utts_subset" \
+ --cleanup.remove-egs=$remove_egs \
+ --cleanup.preserve-model-interval=1000 \
+ --use-gpu=true \
+ --reporting.email="$reporting_email" \
+ --feat-dir=$train_data_dir \
+ --tree-dir=$tree_dir \
+ --lat-dir=$lat_dir \
+ --dir=$dir || exit 1;
+fi
+
+steps/online/nnet3/prepare_online_decoding.sh \
+ --mfcc-config conf/mfcc_hires.conf \
+ $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online
+
+exit 0;
+
+if [ $stage -le 15 ]; then
+ # Note: it's not important to give mkgraph.sh the lang directory with the
+ # matched topology (since it gets the topology file from the model).
+ utils/mkgraph.sh \
+ --self-loop-scale 1.0 data/lang${decode_name:+_$decode_name} \
+ $tree_dir $tree_dir/graph_${decode_name} || exit 1;
+fi
+
+if [ $stage -le 16 ]; then
+ frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
+ rm $dir/.error 2>/dev/null || true
+
+ for data in $test_sets; do
+ (
+ nspk=$(wc -l /dev/null || true
+
+ for data in $test_sets; do
+ (
+ nspk=$(wc -l data/$dataset/spk2utt
+ featdir=mfcc
+ steps/make_mfcc.sh --nj $nj --cmd "$train_cmd" data/$dataset exp/make_mfcc/$dataset $featdir
+ # steps/make_mfcc.sh --nj $nj --cmd "$train_cmd" --allow-downsample true data/$dataset exp/make_mfcc/$dataset $featdir
+ steps/compute_cmvn_stats.sh data/$dataset exp/make_mfcc/$dataset $featdir
+ utils/validate_data_dir.sh data/$dataset
+
+ mkdir -p data/local/dict
+ cp $dict_dir/* data/local/dict/
+ utils/prepare_lang.sh data/local/dict "" data/local/lang data/lang
+ utils/validate_lang.pl data/lang
+
+ # if [ -z "$arpa_file" ]; then
+ # mkdir data/local/tmp
+ # ngram-count -order 3 -write-vocab data/local/tmp/vocab-full.txt -wbdiscount -text data/local/dict/corpus.txt -lm data/local/tmp/lm.arpa
+ # arpa2fst --disambig-symbol=#0 --read-symbol-table=data/lang/words.txt data/local/tmp/lm.arpa data/lang/G.fst
+ # # ../kenlm/lmplz --text dict/corpus.txt --arpa data/local/tmp/lm.arpa -S 50% -o 3
+ # else
+ # zcat -f "$arpa_file" | arpa2fst --disambig-symbol=#0 --read-symbol-table=data/lang/words.txt - data/lang/G.fst
+ # fi
+fi
+
+if [ $stage -le 1 ] && [ $endstage -ge 1 ]; then
+ # monophone
+ steps/train_mono.sh --nj $nj --cmd "$train_cmd" data/train data/lang exp/mono
+ # utils/mkgraph.sh data/lang exp/mono exp/mono/graph
+fi
+
+if [ $stage -le 2 ] && [ $endstage -ge 2 ]; then
+ # tri1 [first triphone pass]
+ steps/align_si.sh --nj $nj --cmd "$train_cmd" data/train data/lang exp/mono exp/mono_ali
+ steps/train_deltas.sh --cmd "$train_cmd" 1800 9000 data/train data/lang exp/mono_ali exp/tri1
+ # utils/mkgraph.sh data/lang exp/tri1 exp/tri1/graph
+fi
+
+if [ $stage -le 3 ] && [ $endstage -ge 3 ]; then
+ # tri2b [LDA+MLLT] aka "tri3"
+ steps/align_si.sh --nj $nj --cmd "$train_cmd" --use-graphs true data/train data/lang exp/tri1 exp/tri1_ali
+ steps/train_lda_mllt.sh --cmd "$train_cmd" --splice-opts "--left-context=3 --right-context=3" 1800 9000 data/train data/lang exp/tri1_ali exp/tri2b
+ # utils/mkgraph.sh data/lang exp/tri2b exp/tri2b/graph
+fi
+
+if [ $stage -le 4 ] && [ $endstage -ge 4 ]; then
+ # tri3b [LDA+MLLT+SAT] aka "tri4"?
+ steps/align_si.sh --nj $nj --cmd "$train_cmd" --use-graphs true data/train data/lang exp/tri2b exp/tri2b_ali
+ #????? steps/align_fmllr.sh --nj 8 --cmd "$train_cmd" --use-graphs true data/train data/lang exp/tri2b exp/tri2b_ali
+ steps/train_sat.sh 1800 9000 data/train data/lang exp/tri2b_ali exp/tri3b
+ # utils/mkgraph.sh data/lang exp/tri3b exp/tri3b/graph
+ # utils/mkgraph.sh data/lang_ug exp/tri3b exp/tri3b/graph_ug
+ # steps/decode_fmllr.sh --config conf/decode.config --nj 1 --num-threads 8 --cmd "$decode_cmd" exp/tri3b/graph_ug data/test exp/tri3b/decode_ug
+ # steps/cleanup/find_bad_utts.sh --nj 1 --cmd "$train_cmd" data/train data/lang exp/tri3b_ali exp/tri3b_cleanup
+ # head exp/tri3b_cleanup/all_info.sorted.txt
+fi
+
+# if [ $stage -le 5 ] && [ $endstage -ge 5 ]; then
+# # tri3b_mmi [LDA+MLLT+SAT+MMI] aka "tri4_mmi"
+# steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" --use-graphs true data/train data/lang exp/tri3b exp/tri3b_ali
+# steps/make_denlats.sh --config conf/decode.config --nj $nj --cmd "$train_cmd" --transform-dir exp/tri3b_ali data/train data/lang exp/tri3b exp/tri3b_denlats
+# steps/train_mmi.sh data/train data/lang exp/tri3b_ali exp/tri3b_denlats exp/tri3b_mmi
+# fi
+
+# if [ $stage -le 8 ] && [ $endstage -ge 8 ]; then
+# local/kaldi/run_personal_chain_tdnn_1h.sh --stage 0 --num-epochs 20
+# fi
+
+exit
+
+# for x in exp/*/decode*; do [ -d $x ] && [[ $x =~ "$1" ]] && grep WER $x/wer_* | utils/best_wer.sh; done
+# for x in exp/chain/*/decode*; do [ -d $x ] && [[ $x =~ "$1" ]] && grep WER $x/wer_* | utils/best_wer.sh; done
diff --git a/run_personal_nnet3_ivector_common.sh b/run_personal_nnet3_ivector_common.sh
new file mode 100644
index 0000000..194b5c0
--- /dev/null
+++ b/run_personal_nnet3_ivector_common.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+# Adapted from egs/mini_librispeech/s5/local/nnet3/run_ivector_common.sh
+
+set -euo pipefail
+
+# This script is called from local/nnet3/run_tdnn.sh and
+# local/chain/run_tdnn.sh (and may eventually be called by more
+# scripts). It contains the common feature preparation and
+# iVector-related parts of the script. See those scripts for examples
+# of usage.
+
+stage=0
+nj=8
+train_set=train
+test_sets=
+gmm=tri3b
+
+nnet3_affix=
+respect_speaker_info=true # You may want false for one-to-one utt2spk.
+
+. ./cmd.sh
+. ./path.sh
+. utils/parse_options.sh
+
+gmm_dir=exp/${gmm}
+ali_dir=exp/${gmm}_ali_${train_set}_sp
+# ali_dir=exp/${gmm}_ali_sp
+
+for f in data/${train_set}/feats.scp ${gmm_dir}/final.mdl; do
+ if [ ! -f $f ]; then
+ echo "$0: expected file $f to exist"
+ exit 1
+ fi
+done
+
+if [ $stage -le 1 ]; then
+ # Although the nnet will be trained by high resolution data, we still have to
+ # perturb the normal data to get the alignment _sp stands for speed-perturbed
+ echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)"
+ utils/data/perturb_data_dir_speed_3way.sh data/${train_set} data/${train_set}_sp
+ echo "$0: making MFCC features for low-resolution speed-perturbed data"
+ steps/make_mfcc.sh --cmd "$train_cmd" --nj $nj data/${train_set}_sp || exit 1;
+ steps/compute_cmvn_stats.sh data/${train_set}_sp || exit 1;
+ utils/fix_data_dir.sh data/${train_set}_sp
+fi
+
+if [ $stage -le 2 ]; then
+ echo "$0: aligning with the perturbed low-resolution data"
+ steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \
+ data/${train_set}_sp data/lang $gmm_dir $ali_dir || exit 1
+fi
+
+if [ $stage -le 3 ]; then
+ # Create high-resolution MFCC features (with 40 cepstra instead of 13).
+ # this shows how you can split across multiple file-systems.
+ echo "$0: creating high-resolution MFCC features"
+ mfccdir=data/${train_set}_sp_hires/data
+ if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then
+ utils/create_split_dir.pl /export/fs0{1,2}/$USER/kaldi-data/mfcc/mini_librispeech-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage
+ fi
+
+ for datadir in ${train_set}_sp ${test_sets}; do
+ utils/copy_data_dir.sh data/$datadir data/${datadir}_hires
+ done
+
+ # do volume-perturbation on the training data prior to extracting hires
+ # features; this helps make trained nnets more invariant to test data volume.
+ utils/data/perturb_data_dir_volume.sh data/${train_set}_sp_hires || exit 1;
+
+ for datadir in ${train_set}_sp ${test_sets}; do
+ steps/make_mfcc.sh --nj $nj --mfcc-config conf/mfcc_hires.conf \
+ --cmd "$train_cmd" data/${datadir}_hires || exit 1;
+ steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1;
+ utils/fix_data_dir.sh data/${datadir}_hires || exit 1;
+ done
+fi
+
+if [ $stage -le 4 ]; then
+ echo "$0: computing a subset of data to train the diagonal UBM."
+ # We'll use about a quarter of the data.
+ mkdir -p exp/nnet3${nnet3_affix}/diag_ubm
+ temp_data_root=exp/nnet3${nnet3_affix}/diag_ubm
+
+ num_utts_total=$(wc -l