将特征在xvector神经网络模型中前向传播,并写出输出向量。我们将说话人识别的特定神经网络结构的输出向量或embedding称之为"Xvector"。该网络结构包括:帧级别的多个前馈层、帧级别之上的聚合层、统计池化层以及段级别的附加层。通常在统计池化层之后的输出层提取xvector。默认情况下,每个语句生成一个xvector。根据需要,可以chunk中提取多个xvector并求平均,以生成单个矢量。

 
 

Usage: nnet3-xvector-compute [options] <raw-nnet-in> <features-rspecifier> <vector-wspecifier>

e.g.: nnet3-xvector-compute final.raw scp:feats.scp ark:nnet_prediction.ark

 
 

对一个语音特征chunk,生成一个xvector

static void RunNnetComputation(const MatrixBase<BaseFloat> &features,

const Nnet &nnet, CachingOptimizingCompiler *compiler,

Vector<BaseFloat> *xvector) {

ComputationRequest request;

request.need_model_derivative = false;

request.store_component_stats = false;

request.inputs.push_back(

IoSpecification("input", 0, features.NumRows()));

IoSpecification output_spec;

output_spec.name = "output";

output_spec.has_deriv = false;

 
 

将output-node所请求的输出Cindex索引数限制为1,这样,一个chunk(segment)只输出一个结果,即xvector

output_spec.indexes.resize(1);

 
 

request.outputs.resize(1);

request.outputs[0].Swap(&output_spec);

std::shared_ptr<const NnetComputation> computation(std::move(compiler->Compile(request)));

Nnet *nnet_to_update = NULL; // we're not doing any update.

NnetComputer computer(NnetComputeOptions(), *computation,

nnet, nnet_to_update);

CuMatrix<BaseFloat> input_feats_cu(features);

computer.AcceptInput("input", &input_feats_cu);

computer.Run();

CuMatrix<BaseFloat> cu_output;

//输出的cu_output为行数为1的矩阵

computer.GetOutputDestructive("output", &cu_output);

xvector->Resize(cu_output.NumCols());

//取输出矩阵的第一行向量作为xvector

xvector->CopyFromVec(cu_output.Row(0));

}

 
 

ParseOptions po(usage);

Timer timer;

 
 

NnetSimpleComputationOptions opts;

CachingOptimizingCompilerOptions compiler_config;

 
 

opts.acoustic_scale = 1.0; // by default do no scaling in this recipe.

 
 

std::string use_gpu = "no";

int32 chunk_size = -1,

min_chunk_size = 100;

//若帧组不足一个chunk,则对input进行左右padding。

bool pad_input = true;

 
 

opts.Register(&po);

compiler_config.Register(&po);

 
 

po.Register("use-gpu", &use_gpu,

"yes|no|optional|wait, only has effect if compiled with CUDA");

po.Register("chunk-size", &chunk_size,

"If set, extracts xectors from specified chunk-size, and averages. "

"If not set, extracts an xvector from all available features.");

po.Register("min-chunk-size", &min_chunk_size,

"Minimum chunk-size allowed when extracting xvectors.");

po.Register("pad-input", &pad_input, "If true, duplicate the first and "

"last frames of the input features as required to equal min-chunk-size.");

 
 

po.Read(argc, argv);

 
 

if (po.NumArgs() != 3) {

po.PrintUsage();

exit(1);

}

 
 

#if HAVE_CUDA==1

CuDevice::Instantiate().SelectGpuId(use_gpu);

#endif

 
 

std::string nnet_rxfilename = po.GetArg(1),

feature_rspecifier = po.GetArg(2),

vector_wspecifier = po.GetArg(3);

 
 

Nnet nnet;

ReadKaldiObject(nnet_rxfilename, &nnet);

SetBatchnormTestMode(true, &nnet);

SetDropoutTestMode(true, &nnet);

CollapseModel(CollapseModelConfig(), &nnet);

 
 

CachingOptimizingCompiler compiler(nnet, opts.optimize_config, compiler_config);

 
 

BaseFloatVectorWriter vector_writer(vector_wspecifier);

 
 

int32 num_success = 0, num_fail = 0;

int64 frame_count = 0;

int32 xvector_dim = nnet.OutputDim("output");

 
 

SequentialBaseFloatMatrixReader feature_reader(feature_rspecifier);

for (; !feature_reader.Done(); feature_reader.Next()) {

std::string utt = feature_reader.Key();

const Matrix<BaseFloat> &features (feature_reader.Value());

if (features.NumRows() == 0) {

KALDI_WARN << "Zero-length utterance: " << utt;

num_fail++;

continue;

}

int32 num_rows = features.NumRows(),

feat_dim = features.NumCols(),

this_chunk_size = chunk_size;

if (!pad_input && num_rows < min_chunk_size) {

KALDI_WARN << "Minimum chunk size of " << min_chunk_size

<< " is greater than the number of rows "

<< "in utterance: " << utt;

num_fail++;

continue;

} else if (num_rows < chunk_size) {

KALDI_LOG << "Chunk size of " << chunk_size << " is greater than "

<< "the number of rows in utterance: " << utt

<< ", using chunk size of " << num_rows;

this_chunk_size = num_rows;

} else if (chunk_size == -1) {

this_chunk_size = num_rows;

}

//num_chunks=1

int32 num_chunks = ceil(

num_rows / static_cast<BaseFloat>(this_chunk_size));

Vector<BaseFloat> xvector_avg(xvector_dim, kSetZero);

BaseFloat tot_weight = 0.0;

 
 

// Iterate over the feature chunks.

for (int32 chunk_indx = 0; chunk_indx < num_chunks; chunk_indx++) {

//若接近输入的末尾,需要考虑剩余的帧是否足以凑足一个chunk。

int32 offset = std::min(

this_chunk_size, num_rows - chunk_indx * this_chunk_size);

if (!pad_input && offset < min_chunk_size)

continue;

SubMatrix<BaseFloat> sub_features(

features, chunk_indx * this_chunk_size, offset, 0, feat_dim);

Vector<BaseFloat> xvector;

tot_weight += offset;

 
 

// Pad input if the offset is less than the minimum chunk size

if (pad_input && offset < min_chunk_size) {

Matrix<BaseFloat> padded_features(min_chunk_size, feat_dim);

int32 left_context = (min_chunk_size - offset) / 2;

int32 right_context = min_chunk_size - offset - left_context;

for (int32 i = 0; i < left_context; i++) {

padded_features.Row(i).CopyFromVec(sub_features.Row(0));

}

for (int32 i = 0; i < right_context; i++) {

padded_features.Row(min_chunk_size - i - 1).CopyFromVec(sub_features.Row(offset - 1));

}

padded_features.Range(left_context, offset, 0, feat_dim).CopyFromMat(sub_features);

//一个chunk生成一个xvector

RunNnetComputation(padded_features, nnet, &compiler, &xvector);

} else {

RunNnetComputation(sub_features, nnet, &compiler, &xvector);

}

//将所有chunk的xvectors进行累加

xvector_avg.AddVec(offset, xvector);

}

//求所有chunk的平均xvector

xvector_avg.Scale(1.0 / tot_weight);

vector_writer.Write(utt, xvector_avg);

 
 

frame_count += features.NumRows();

num_success++;

}

  

 

nnet3bin/nnet3-xvector-compute.cc的更多相关文章

  1. openStack kilo 手动Manual部署随笔记录

    一 ,基于neutron网络资源主机(控制节点,网络节点,计算节点)网络规划配置 1, controller.cc 节点 网络配置截图

  2. World Finals 2017

    Need for Speed   Sheila is a student and she drives a typical student car: it is old, slow, rusty, a ...

  3. 图像匹配 | NCC 归一化互相关损失 | 代码 + 讲解

    文章转载自:微信公众号「机器学习炼丹术」 作者:炼丹兄(已授权) 作者联系方式:微信cyx645016617(欢迎交流共同进步) 本次的内容主要讲解NCCNormalized cross-correl ...

  4. Xvector in Kaldi nnet3

    Xvector nnet Training of Xvector nnet Xvector nnet in Kaldi     Statistics Extraction Layer in Kaldi ...

  5. [CC]区域生长算法——点云分割

    基于CC写的插件,利用PCL中算法实现: void qLxPluginPCL::doRegionGrowing() { assert(m_app); if (!m_app) return; const ...

  6. [CC]点云密度计算

    包括两种计算方法:精确计算和近似计算(思考:local density=单位面积的点数 vs  local density =1/单个点所占的面积) 每种方法可以实现三种模式的点云密度计算,CC里面的 ...

  7. Atitti.dw cc 2015 绿色版本安装总结

    Atitti.dw cc 2015 绿色版本安装总结 1.1. 安装程序无法初始化.请下载adobe Support Advisor检测该问题.1 1.1.1. Adobe Application M ...

  8. C#中DataTable中的Compute方法使用收集

    原文: C#中DataTable中的Compute方法使用收集 Compute函数的参数就两个:Expression,和Filter. Expresstion是计算表达式,关于Expression的详 ...

  9. 【Hello CC.NET】CC.NET 实现自动化集成

    一.背景 公司的某一金融项目包含 12 个子系统,新需求一般按分支来开发,测完后合并到主干发布.开发团队需要同时维护开发环境.测试环境.模拟环境(主干).目前面临最大的两个问题: 1.子系统太多,每次 ...

随机推荐

  1. 平滑升级你的Nginx

    1.概述(可以直接跳过看第2部分) Nginx方便地帮助我们实现了平滑升级.其原理简单概括,就是: (1)在不停掉老进程的情况下,启动新进程. (2)老进程负责处理仍然没有处理完的请求,但不再接受处理 ...

  2. nginx 反向代理 负载均衡

    nginx反向代理 用户(浏览器) 请求网站资源 -> 直接定位到django后台(所有的请求压力,都直接给了后台) django默认对并发性 很差,并且处理网页的静态资源,效率很差 10万个并 ...

  3. js 批量替换

    html = html.replace(new RegExp(title,"gm"), "<span style='color:red;'>"+ti ...

  4. cpu_ops、suspend_ops、arm_idle_driver以及machine_restart/machine_power_off到底层PSCI Firmware分析

    在内核中针对的cpu的操作,比如arm_cpuidle_init.arm_cpuidle_suspend.boot_secondary.secondary_start_kernel.op_cpu_di ...

  5. centos 6.8 nginx+mysql+php

    1:查看环境: [root@123 /]# cat /etc/redhat-release CentOS release 6.8 (Final) 2:关掉防火墙 [root@123 /]# chkco ...

  6. 证明与计算(4): 完美散列函数(Perfect Hash function)

    原文:wiki: 完美散列函数 假设,写一个SQL语句解析器,词法分析对SQL语句解析,把语句分成了多个token,一般这个时候会需要查询这个token是否是一个关键字token. 例如keyword ...

  7. 【转】CENTOS/RHEL 7 系统中设置SYSTEMD SERVICE的ULIMIT资源限制

    在bash中,有个ulimit命令,提供了对shell及该shell启动的进程的可用资源控制.主要包括打开文件描述符数量.用户的最大进程数量.coredump文件的大小等. 在centos 5/6 等 ...

  8. 解决PHP乱码

    如果你的PHP页面出现了乱码,之需要在页面的开始处加入下面代码就可以了. <?php header("content-type:text/html;charset=utf-8" ...

  9. form表单中新增button按钮,点击按钮表单会进行提交

    原生button控件,在非ie浏览器下,如果不指定type,默认为submit类型.如果不想自动提交表单,指定type=“button”

  10. git 学习(3) ----- 代码共享和多人协作

    当我们开发项目的时候,项目会越来越大,就有可能需要其它同事进行参与,甚至进行开源,这时就需要找一个地方把代码存放起来,好供其它人下载并开发.这个地方,最好放到服务器上,因为只要能上网,就可以获取到, ...