今天为大家带来混合高斯模型的EM推导求解过程。

watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQveHVhbnl1YW5zZW4=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/Center" alt="">

所有代码例如以下!

def NDimensionGaussian(X_vector,U_Mean,CovarianceMatrix):
#X=numpy.mat(X_vector)
X=X_vector
D=numpy.shape(X)[0]
#U=numpy.mat(U_Mean)
U=U_Mean
#CM=numpy.mat(CovarianceMatrix)
CM=CovarianceMatrix
Y=X-U
temp=Y.transpose() * CM.I * Y
result=(1.0/((2*numpy.pi)**(D/2)))*(1.0/(numpy.linalg.det(CM)**0.5))*numpy.exp(-0.5*temp)
return result def CalMean(X):
D,N=numpy.shape(X)
MeanVector=numpy.mat(numpy.zeros((D,1)))
for d in range(D):
for n in range(N):
MeanVector[d,0] += X[d,n]
MeanVector[d,0] /= float(N)
return MeanVector def CalCovariance(X,MV):
D,N=numpy.shape(X)
CoV=numpy.mat(numpy.zeros((D,D)))
for n in range(N):
Temp=X[:,n]-MV
CoV += Temp*Temp.transpose()
CoV /= float(N)
return CoV def CalEnergy(Xn,Pik,Uk,Cov):
D,N=numpy.shape(Xn)
D_k,K=numpy.shape(Uk)
if D!=D_k:
print ('dimension not equal, break')
return energy=0.0
for n_iter in range(N):
temp=0
for k_iter in range(K):
temp += Pik[0,k_iter] * NDimensionGaussian(Xn[:,n_iter],Uk[:,k_iter],Cov[k_iter])
energy += numpy.log(temp)
return float(energy) def SequentialEMforMixGaussian(InputData,K):
#初始化piK
pi_Cof=numpy.mat(numpy.ones((1,K))*(1.0/float(K)))
X=numpy.mat(InputData)
X_mean=CalMean(X)
print (X_mean)
X_cov=CalCovariance(X,X_mean)
print (X_cov)
#初始化uK,当中第k列表示第k个高斯函数的均值向量
#X为D维,N个样本点
D,N=numpy.shape(X)
print (D,N)
UK=numpy.mat(numpy.zeros((D,K)))
for d_iter in range(D):
for k_iter in range(K):
UK[d_iter,k_iter] = X_mean[d_iter,0] + (-1)**k_iter + (-1)**d_iter
print (UK)
#初始化k个协方差矩阵的列表
List_cov=[] for k_iter in range(K):
List_cov.append(numpy.mat(numpy.eye(X[:,0].size)))
print (List_cov) List_cov_new=copy.deepcopy(List_cov)
rZnk=numpy.mat(numpy.zeros((N,K)))
denominator=numpy.mat(numpy.zeros((N,1)))
rZnk_new=numpy.mat(numpy.zeros((N,K))) Nk=0.5*numpy.mat(numpy.ones((1,K)))
print (Nk)
Nk_new=numpy.mat(numpy.zeros((1,K)))
UK_new=numpy.mat(numpy.zeros((D,K)))
pi_Cof_new=numpy.mat(numpy.zeros((1,K))) for n_iter in range(1,N):
#rZnk=pi_k*Gaussian(Xn|uk,Cov_k)/sum(pi_j*Gaussian(Xn|uj,Cov_j))
for k_iter in range(K):
rZnk_new[n_iter,k_iter] = pi_Cof[0,k_iter] * NDimensionGaussian(X[:,n_iter],UK[:,k_iter],List_cov[k_iter])
denominator[n_iter,0] += rZnk_new[n_iter,k_iter]
for k_iter in range(K):
rZnk_new[n_iter,k_iter] /= denominator[n_iter,0]
print ('rZnk_new', rZnk_new[n_iter,k_iter],'\n')
for k_iter in range(K):
Nk_new[0,k_iter] = Nk[0,k_iter] + rZnk_new[n_iter,k_iter] - rZnk[n_iter,k_iter]
print ('Nk_new',Nk_new,'\n')
##############当前有(n_iter+1)样本###########################
pi_Cof_new[0,k_iter] = Nk_new[0,k_iter]/float(n_iter+1)
print ('pi_Cof_new',pi_Cof_new,'\n')
UK_new[:,k_iter] = UK[:,k_iter] + ( (rZnk_new[n_iter,k_iter] - rZnk[n_iter,k_iter])/float(Nk_new[0,k_iter]) ) * (X[:,n_iter]-UK[:,k_iter])
print ('UK_new',UK_new,'\n')
Temp = X[:,n_iter] - UK_new[:,k_iter]
List_cov_new[k_iter] = List_cov[k_iter] + ((rZnk_new[n_iter,k_iter] - rZnk[n_iter,k_iter])/float(Nk_new[0,k_iter]))*(Temp*Temp.transpose()-List_cov[k_iter])
print ('List_cov_new',List_cov_new,'\n') rZnk=copy.deepcopy(rZnk_new)
pi_Cof=copy.deepcopy(pi_Cof_new)
UK_new=copy.deepcopy(UK)
List_cov=copy.deepcopy(List_cov_new)
print (pi_Cof,UK_new,List_cov)
return pi_Cof,UK_new,List_cov def BatchEMforMixGaussian(InputData,K,MaxIter):
#初始化piK
pi_Cof=numpy.mat(numpy.ones((1,K))*(1.0/float(K)))
X=numpy.mat(InputData)
X_mean=CalMean(X)
print (X_mean)
X_cov=CalCovariance(X,X_mean)
print (X_cov)
#初始化uK,当中第k列表示第k个高斯函数的均值向量
#X为D维,N个样本点
D,N=numpy.shape(X)
print (D,N)
UK=numpy.mat(numpy.zeros((D,K)))
for d_iter in range(D):
for k_iter in range(K):
UK[d_iter,k_iter] = X_mean[d_iter,0] + (-1)**k_iter + (-1)**d_iter
print (UK)
#初始化k个协方差矩阵的列表
List_cov=[] for k_iter in range(K):
List_cov.append(numpy.mat(numpy.eye(X[:,0].size)))
print (List_cov) energy_new=0
energy_old=CalEnergy(X,pi_Cof,UK,List_cov)
print (energy_old)
currentIter=0
while True:
currentIter += 1 List_cov_new=[]
rZnk=numpy.mat(numpy.zeros((N,K)))
denominator=numpy.mat(numpy.zeros((N,1)))
Nk=numpy.mat(numpy.zeros((1,K)))
UK_new=numpy.mat(numpy.zeros((D,K)))
pi_new=numpy.mat(numpy.zeros((1,K))) #rZnk=pi_k*Gaussian(Xn|uk,Cov_k)/sum(pi_j*Gaussian(Xn|uj,Cov_j))
for n_iter in range(N):
for k_iter in range(K):
rZnk[n_iter,k_iter] = pi_Cof[0,k_iter] * NDimensionGaussian(X[:,n_iter],UK[:,k_iter],List_cov[k_iter])
denominator[n_iter,0] += rZnk[n_iter,k_iter]
for k_iter in range(K):
rZnk[n_iter,k_iter] /= denominator[n_iter,0]
#print 'rZnk', rZnk[n_iter,k_iter] #pi_new=sum(rZnk)
for k_iter in range(K):
for n_iter in range(N):
Nk[0,k_iter] += rZnk[n_iter,k_iter]
pi_new[0,k_iter] = Nk[0,k_iter]/(float(N))
#print 'pi_k_new',pi_new[0,k_iter] #uk_new= (1/sum(rZnk))*sum(rZnk*Xn)
for k_iter in range(K):
for n_iter in range(N):
UK_new[:,k_iter] += (1.0/float(Nk[0,k_iter]))*rZnk[n_iter,k_iter]*X[:,n_iter]
#print 'UK_new',UK_new[:,k_iter] for k_iter in range(K):
X_cov_new=numpy.mat(numpy.zeros((D,D)))
for n_iter in range(N):
Temp = X[:,n_iter] - UK_new[:,k_iter]
X_cov_new += (1.0/float(Nk[0,k_iter]))*rZnk[n_iter,k_iter] * Temp * Temp.transpose()
#print 'X_cov_new',X_cov_new
List_cov_new.append(X_cov_new) energy_new=CalEnergy(X,pi_new,UK_new,List_cov)
print ('energy_new',energy_new)
#print pi_new
#print UK_new
#print List_cov_new
if energy_old>=energy_new or currentIter>MaxIter:
UK=copy.deepcopy(UK_new)
pi_Cof=copy.deepcopy(pi_new)
List_cov=copy.deepcopy(List_cov_new)
break
else:
UK=copy.deepcopy(UK_new)
pi_Cof=copy.deepcopy(pi_new)
List_cov=copy.deepcopy(List_cov_new)
energy_old=energy_new return pi_Cof,UK,List_cov

混合高斯模型的EM求解(Mixtures of Gaussians)及Python实现源代码的更多相关文章

  1. PRML读书会第九章 Mixture Models and EM(Kmeans,混合高斯模型,Expectation Maximization)

    主讲人 网络上的尼采 (新浪微博: @Nietzsche_复杂网络机器学习) 网络上的尼采(813394698) 9:10:56 今天的主要内容有k-means.混合高斯模型. EM算法.对于k-me ...

  2. 混合高斯模型(Mixtures of Gaussians)和EM算法

    这篇讨论使用期望最大化算法(Expectation-Maximization)来进行密度估计(density estimation). 与k-means一样,给定的训练样本是,我们将隐含类别标签用表示 ...

  3. 记录:EM 算法估计混合高斯模型参数

    当概率模型依赖于无法观测的隐性变量时,使用普通的极大似然估计法无法估计出概率模型中参数.此时需要利用优化的极大似然估计:EM算法. 在这里我只是想要使用这个EM算法估计混合高斯模型中的参数.由于直观原 ...

  4. <转>与EM相关的两个算法-K-mean算法以及混合高斯模型

    转自http://www.cnblogs.com/jerrylead/archive/2011/04/06/2006924.html http://www.cnblogs.com/jerrylead/ ...

  5. EM相关两个算法 k-mean算法和混合高斯模型

    转自http://www.cnblogs.com/jerrylead/archive/2011/04/06/2006924.html http://www.cnblogs.com/jerrylead/ ...

  6. EM算法与混合高斯模型

    非常早就想看看EM算法,这个算法在HMM(隐马尔科夫模型)得到非常好的应用.这个算法公式太多就手写了这部分主体部分. 好的參考博客:最大似然预计到EM,讲了详细样例通熟易懂. JerryLead博客非 ...

  7. 混合高斯模型(Mixtures of Gaussians)

    http://www.cnblogs.com/jerrylead/archive/2011/04/06/2006924.html 这篇讨论使用期望最大化算法(Expectation-Maximizat ...

  8. [zz] 混合高斯模型 Gaussian Mixture Model

    聚类(1)——混合高斯模型 Gaussian Mixture Model http://blog.csdn.net/jwh_bupt/article/details/7663885 聚类系列: 聚类( ...

  9. 混合高斯模型(GMM)推导及实现

    作者:桂. 时间:2017-03-20  06:20:54 链接:http://www.cnblogs.com/xingshansi/p/6584555.html 声明:欢迎被转载,不过记得注明出处哦 ...

随机推荐

  1. Python Study(02)之 Context Manager

    上下文管理器(context manager)是Python2.5开始支持的一种语法,用于规定某个对象的使用范围.一旦对象进入或者离开该使用范围,会有特殊操作被调用 (比如为对象分配或者释放内存).它 ...

  2. 2014年spark开发人员大赛火热进行中!

    "发现最有正能量的网络达人".Spark开发人员大赛火热进行! watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd3d0dHoxOTc0/ ...

  3. HDU 4341

    分组背包而已.注意的是,每个时间T,要把一组的全加进去比较一次. #include <iostream> #include <cstdio> #include <cstr ...

  4. Leetcode_299_Bulls and Cows

    本文是在学习中的总结,欢迎转载但请注明出处:http://blog.csdn.net/pistolove/article/details/50768550 You are playing the fo ...

  5. hadoop(七) - hadoop集群环境搭建

    一. 前言: hadoop2.0已经公布了稳定版本号了,添加了非常多特性,比方HDFS HA.YARN等.最新的hadoop-2.4.1又添加了YARN HA 注意:apache提供的hadoop-2 ...

  6. intellij idea 运行jedis

    到这里下载 http://mvnrepository.com/ jar包! 将jar包放入项目目录中,并引入! 引入包到项目中!创建对象! package com.company; import re ...

  7. UESTC--1272--Final Pan's prime numbers(水题)

    Final Pan's prime numbers Time Limit: 1000MS   Memory Limit: 65535KB   64bit IO Format: %lld & % ...

  8. C/C++ 工具函数 —— 大端模式和小端模式的互换

    小端模式:小在小,大在大:大端模式:小在大,大在小: uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00 ...

  9. 没有被广泛采用的box-sizing属性

    在标准盒模型下设置的width和height只是内容的宽和高,但在设置了宽和高的情况下若还要设置border.margin.padding等时,会发生溢出的现象,因此需要将盒模型更改. box-siz ...

  10. 24.qint64转QString 以及获取文件属性

    qint64转QString qint64 size = info.size(); //qint64 转QString QString size2 = tr("%1").arg(s ...