kaldi脚本注释二
steps/decode.sh
#!/bin/bash # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
# Apache 2.0 # Begin configuration section.
transform_dir= # this option won't normally be used, but it can be used if you want to
# supply existing fMLLR transforms when decoding.
iter=
model= # You can specify the model to use (e.g. if you want to use the .alimdl)
stage=0
nj=4
cmd=run.pl
max_active=7000
beam=13.0
lattice_beam=6.0
acwt=0.083333 # note: only really affects pruning (scoring is on lattices).
num_threads=1 # if >1, will use gmm-latgen-faster-parallel
parallel_opts= # ignored now.
scoring_opts=
# note: there are no more min-lmwt and max-lmwt options, instead use
# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20"
skip_scoring=false
decode_extra_opts=
# End configuration section. echo "$0 $@" # Print the command line for logging [ -f ./path.sh ] && . ./path.sh; # source the path.
. parse_options.sh || exit 1; if [ $# != 3 ]; then
echo "Usage: steps/decode.sh [options] <graph-dir> <data-dir> <decode-dir>"
echo "... where <decode-dir> is assumed to be a sub-directory of the directory"
echo " where the model is."
echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr"
echo ""
echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out"
echo "what type of features you used (assuming it's one of these two)"
echo ""
echo "main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --nj <nj> # number of parallel jobs"
echo " --iter <iter> # Iteration of model to test."
echo " --model <model> # which model to use (e.g. to"
echo " # specify the final.alimdl)"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --transform-dir <trans-dir> # dir to find fMLLR transforms "
echo " --acwt <float> # acoustic scale used for lattice generation "
echo " --scoring-opts <string> # options to local/score.sh"
echo " --num-threads <n> # number of threads to use, default 1."
echo " --parallel-opts <opts> # ignored now, present for historical reasons."
exit 1;
fi graphdir=$1 #解码图
data=$2 #测试数据
dir=$3 #输出测试结果文件夹
srcdir=`dirname $dir`; # The model directory is one level up from decoding directory.
sdata=$data/split$nj;#切分文件夹 mkdir -p $dir/log #创建结果文件夹/log文件夹
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;#$sdata文件夹是否存在 且 里面的feats.scp是否比该文件夹旧;文件夹切分
echo $nj > $dir/num_jobs #打印线程数字 保存在结果文件夹/num_jobs文件 if [ -z "$model" ]; then # if --model <mdl> was not specified on the command line... 命令行里,模型文件没有指定
if [ -z $iter ]; then model=$srcdir/final.mdl; #$iter也没有指定迭代次数, 则模型文件 赋值
else model=$srcdir/$iter.mdl; fi #如果$iter有指定,则模型文件 重新赋值;
fi if [ $(basename $model) != final.alimdl ] ; then #如模型文件 不为 final.alidl
# Do not use the $srcpath -- look at the path where the model is 则不使用srcpath路径,查看你下模型文件再哪里
if [ -f $(dirname $model)/final.alimdl ] && [ -z "$transform_dir" ]; then #如果/final.alimdl不存在,切transform_dir没有指定
echo -e '\n\n'
echo $0 'WARNING: Running speaker independent system decoding using a SAT model!'
echo $0 'WARNING: This is OK if you know what you are doing...'
echo -e '\n\n'
fi
fi for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do
[ ! -f $f ] && echo "decode.sh: no such file $f" && exit 1; #以上文件如果不存在,则 打印提示,退出
done if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
echo "decode.sh: feature type is $feat_type";#根据final.mat是否存在来判断feat_type 特征类型 splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
delta_opts=`cat $srcdir/delta_opts 2>/dev/null` thread_string=
[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads"
#根据feat_type类型,提取的特征处理不一样
case $feat_type in
delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";;
*) echo "Invalid feature type $feat_type" && exit 1;
esac
if [ ! -z "$transform_dir" ]; then # add transforms to features... 如果没有指定变换输出的文件夹
echo "Using fMLLR transforms from $transform_dir"
[ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist."
[ ! -s $transform_dir/num_jobs ] && \
echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1;
nj_orig=$(cat $transform_dir/num_jobs)
if [ $nj -ne $nj_orig ]; then #jobs数目 与transform_dir里面的jobs数目不相等,则文档后缀用索引数字
# Copy the transforms into an archive with an index.
echo "$0: num-jobs for transforms mismatches, so copying them."
for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \
copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1;
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |"
else
# number of jobs matches with alignment dir.
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |"
fi
fi if [ $stage -le 0 ]; then #小于0
if [ -f "$graphdir/num_pdfs" ]; then
[ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \
{ echo "Mismatch in number of pdfs with $model"; exit 1; }
fi
$cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \
gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \
--acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt $decode_extra_opts \
$model $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1;#gmm-latgen-faster解码
fi if [ $stage -le 1 ]; then #小于1
[ ! -z $iter ] && iter_opt="--iter $iter"
steps/diagnostic/analyze_lats.sh --cmd "$cmd" $iter_opt $graphdir $dir
fi if ! $skip_scoring ; then #是否执行评分脚本
[ ! -x local/score.sh ] && \
echo "Not scoring because local/score.sh does not exist or not executable." && exit 1;
local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir ||
{ echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; }
fi exit 0;
kaldi脚本注释二的更多相关文章
- kaldi脚本注释一
utils/split_data.sh ##再$data文件夹下,创建split{num_split}文件夹,再split×里面创建所有的数字文件夹#后面基本上是把$data文件夹下的各个文件都进行s ...
- Linux shell脚本编程(二)
Linux shell脚本编程(二) 练习:求100以内所有偶数之和; 使用至少三种方法实现; 示例1: #!/bin/bash # declare -i sum=0 #声明一个变量求和,初始值为0 ...
- shell脚本(二)
shell脚本(二)——变量 一.定义:用来存放各种数据,编程语言组成部分 变量的命名规则: 变量名由数字 字母下划线组成 必须以字母或者下划线开头 不能使用shell里面的关键词 ...
- NET中的规范标准注释(二) -- 创建帮助文档入门篇
一.摘要 在本系列的第一篇文章介绍了.NET中XML注释的用途, 本篇文章将讲解如何使用XML注释生成与MSDN一样的帮助文件.主要介绍NDoc的继承者:SandCastle. 二.背景 要生成帮助文 ...
- SecureCrt脚本(二)二级对象之Dialog
Crt自动化 测试 SecureCrt脚本 JS脚本 1.引言 2.Dialog属性和方法 2.1.属性 2.2.方法 2.2.1.FileOpenDialog 2.2.2.MessageBox ...
- 在C#环境中动态调用IronPython脚本(二)
一.Python数据类型与C#数据类型的对应 Python中数据类型中的简单类型,例如int,float,string可以对应到C#环境中的int32,double,string,这些对应比较直观,P ...
- JsDoc脚本注释文档生成
使用jsDoc可使用特定注释,将注释的内容生成文档,可用于生成脚本库的API文档 jsdoc 文档: http://usejsdoc.org/
- yesno孤立词识别kaldi脚本
path.sh主要设定路径等 export KALDI_ROOT=`pwd`/../../.. [ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ ...
- Python文件中执行脚本注释和编码声明
在 Python 脚本的第一行经常见到这样的注释: #!/usr/bin/env python3 或者 #!/usr/bin/python3 含义 在脚本中, 第一行以 #! 开头的代码, 在计算机行 ...
随机推荐
- spark java.lang.OutOfMemoryError: unable to create new native thread
最近迁移集群,在hadoop-2.8.4 的yarn上跑 spark 程序 报了以下错误 java.lang.OutOfMemoryError: unable to create new native ...
- python3 不知文件编码情况下打开文件代码记录
import chardet path='test.txt' bytes = min(100, os.path.getsize(path)) raw = open(path, 'rb').read(b ...
- leetcode102
本题是广度优先遍历(BFS)实现树的层次遍历,使用队列实现. class Solution { public: vector<vector<int>> levelOrder(T ...
- leetcode1032
class StreamChecker: def __init__(self, words: 'List[str]'): self.maxLen = 0 self.List = set(words) ...
- Spring再接触 IOC DI
直接上例子 引入spring以及Junite所需要的jar包 User.java package com.bjsxt.model; public class User { private String ...
- tfs填坑那些事
1.csdn下载 安装(步1:装 步2:激活 步3:配置数据库之类 选择完全配置)激活(win7能出来,win10不出来) 注意 tfs服务开启,,代理服务开启 2.选择敏捷模板 3.无法新建项目, ...
- Kafka自带zookeeper报错INFO Got user-level KeeperException when processing xxx Error Path:/brokers Error:KeeperErrorCode = NodeExists for /brokers (org.apache.zookeeper.server.PrepRequestProcessor)
问题描述: 按照kafka官方文档的操作步骤,解压kafka压缩包后.依次启动zookeeper,和kafka服务 kafka服务启动后,查看到zookeeper日志里有以下异常 问题原因及解决办法: ...
- spring okhttp3
准备工作 在pom.xml文件中增加以下依赖 <dependency> <groupId>com.squareup.okhttp3</groupId> <ar ...
- 373. Find K Pairs with Smallest Sums 找出求和和最小的k组数
[抄题]: You are given two integer arrays nums1 and nums2 sorted in ascending order and an integer k. D ...
- 【转载】我为什么弃用OpenStack转向VMware vsphere
我为什么弃用OpenStack转向VMware Vsphere,一切皆为简单.高效.因为我们在工作过程中涉及到大量的测试工作,每天都有成百个虚拟机的创建和销毁工作. 工作任务非常繁重,我们的持续集成平 ...