# -*- coding:utf-8 -*-

import sys
import re
from hmmlearn import hmm
import numpy as np
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tldextract
import os def iterbrowse(path):
for home, dirs, files in os.walk(path):
for filename in files:
yield os.path.join(home, filename) def extract_domain(domain):
suffix = {'.com','.la','.io', '.co', '.cn','.info', '.net', '.org','.me', '.mobi', '.us', '.biz', '.xxx', '.ca', '.co.jp', '.com.cn', '.net.cn', '.org.cn', '.mx','.tv', '.ws', '.ag', '.com.ag', '.net.ag', '.org.ag','.am','.asia', '.at', '.be', '.com.br', '.net.br', '.name', '.live', '.news', '.bz', '.tech', '.pub', '.wang', '.space', '.top', '.xin', '.social', '.date', '.site', '.red', '.studio', '.link', '.online', '.help', '.kr', '.club', '.com.bz', '.net.bz', '.cc', '.band', '.market', '.com.co', '.net.co', '.nom.co', '.lawyer', '.de', '.es', '.com.es', '.nom.es', '.org.es', '.eu', '.wiki', '.design', '.software', '.fm', '.fr', '.gs', '.in', '.co.in', '.firm.in', '.gen.in', '.ind.in', '.net.in', '.org.in', '.it', '.jobs', '.jp', '.ms', '.com.mx', '.nl','.nu','.co.nz','.net.nz', '.org.nz', '.se', '.tc', '.tk', '.tw', '.com.tw', '.idv.tw', '.org.tw', '.hk', '.co.uk', '.me.uk', '.org.uk', '.vg'} domain = domain.lower()
names = domain.split(".")
if len(names) >= 3:
if ("."+".".join(names[-2:])) in suffix:
return ".".join(names[-3:]), ".".join(names[:-3])
elif ("."+names[-1]) in suffix:
return ".".join(names[-2:]), ".".join(names[:-2])
print "New domain suffix found. Use tld extract domain..." pos = domain.rfind("/")
if pos >= 0: # maybe subdomain contains /, for dns tunnel tool
ext = tldextract.extract(domain[pos+1:])
subdomain = domain[:pos+1] + ext.subdomain
else:
ext = tldextract.extract(domain)
subdomain = ext.subdomain
if ext.suffix:
mdomain = ext.domain + "." + ext.suffix
else:
mdomain = ext.domain
return mdomain, subdomain def parse(log):
data = log.split('^')
SRC_PORT_IDX = 5-1
DST_PORT_IDX = 6-1
PROTOCOL_IDX = 7-1
protol = data[PROTOCOL_IDX]
dstport = data[DST_PORT_IDX]
if '' == protol and ('' == dstport):
DNS_QUERY_NAME_IDX = 55-1 # domain
if (len(data) < 55):
print "error line:"
print log
return ("", "")
domain = data[DNS_QUERY_NAME_IDX]
mdomain, subdomain = extract_domain(domain)
return (mdomain, subdomain)
else:
print "error line not a DNS:"
print log
return ("", "") #处理域名的最小长度
MIN_LEN=3 #状态个数
N=5
#最大似然概率阈值
T=-50 #模型文件名
FILE_MODEL="hmm-cdn.m" def get_cdn_domains(dir_path):
domain_list=[]
for path in iterbrowse(dir_path):
with open(path) as f:
for line in f:
mdomain, sub_domain = parse(line)
if len(sub_domain) >= MIN_LEN:
domain_list.append(sub_domain)
if len(domain_list) >= 2000:
return domain_list
#else:
# print path, "pass line:", line
return domain_list def domain2ver(domain):
ver=[]
for i in range(0,len(domain)):
ver.append([ord(domain[i])])
return ver def train_hmm(domain_list):
X = [[0]]
X_lens = [1]
for domain in domain_list:
ver=domain2ver(domain)
np_ver = np.array(ver)
#print len(np_ver)
try:
X=np.concatenate([X,np_ver])
except ValueError:
print domain
print len(X), len(np_ver)
print X
print np_ver
raise
X_lens.append(len(np_ver)) remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=100)
remodel.fit(X,X_lens)
joblib.dump(remodel, FILE_MODEL) return remodel def test(remodel, domain_list):
x=[]
y=[]
for domain in domain_list:
domain_ver=domain2ver(domain)
np_ver = np.array(domain_ver)
pro = remodel.score(np_ver)
print "SCORE:(%d) DOMAIN:(%s) " % (pro, domain)
x.append(len(domain))
y.append(pro)
return x,y if __name__ == '__main__':
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_cdn")
remodel=train_hmm(domain_list)
remodel=joblib.load(FILE_MODEL) x_1,y_1=test(remodel, domain_list)
print x_1
print y_1
#sys.exit(0)
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_black")
x_2,y_2=test(remodel, domain_list)
print x_2
print y_2
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
x_3,y_3=test(remodel, domain_list)
print x_3
print y_3
#%matplotlib inline
fig,ax=plt.subplots()
ax.set_xlabel('Domain Length')
ax.set_ylabel('HMM Score')
ax.scatter(x_3,y_3,color='b',label="WHITE")
ax.scatter(x_2, y_2, color='g', label="BLACK")
ax.scatter(x_1, y_1, color='r', label="CDN")
ax.legend(loc='right')
plt.show()

使用pickle保存和加载模型:

# -*- coding:utf-8 -*-

import sys
import re
from hmmlearn import hmm
import numpy as np
#from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tldextract
import os
import pickle def iterbrowse(path):
for home, dirs, files in os.walk(path):
for filename in files:
yield os.path.join(home, filename) def extract_domain(domain):
suffix = {'.com','.la','.io', '.co', '.cn','.info', '.net', '.org','.me', '.mobi', '.us', '.biz', '.xxx', '.ca', '.co.jp', '.com.cn', '.net.cn', '.org.cn', '.mx','.tv', '.ws', '.ag', '.com.ag', '.net.ag', '.org.ag','.am','.asia', '.at', '.be', '.com.br', '.net.br', '.name', '.live', '.news', '.bz', '.tech', '.pub', '.wang', '.space', '.top', '.xin', '.social', '.date', '.site', '.red', '.studio', '.link', '.online', '.help', '.kr', '.club', '.com.bz', '.net.bz', '.cc', '.band', '.market', '.com.co', '.net.co', '.nom.co', '.lawyer', '.de', '.es', '.com.es', '.nom.es', '.org.es', '.eu', '.wiki', '.design', '.software', '.fm', '.fr', '.gs', '.in', '.co.in', '.firm.in', '.gen.in', '.ind.in', '.net.in', '.org.in', '.it', '.jobs', '.jp', '.ms', '.com.mx', '.nl','.nu','.co.nz','.net.nz', '.org.nz', '.se', '.tc', '.tk', '.tw', '.com.tw', '.idv.tw', '.org.tw', '.hk', '.co.uk', '.me.uk', '.org.uk', '.vg'} domain = domain.lower()
names = domain.split(".")
if len(names) >= 3:
if ("."+".".join(names[-2:])) in suffix:
return ".".join(names[-3:]), ".".join(names[:-3])
elif ("."+names[-1]) in suffix:
return ".".join(names[-2:]), ".".join(names[:-2])
print "New domain suffix found. Use tld extract domain..." pos = domain.rfind("/")
if pos >= 0: # maybe subdomain contains /, for dns tunnel tool
ext = tldextract.extract(domain[pos+1:])
subdomain = domain[:pos+1] + ext.subdomain
else:
ext = tldextract.extract(domain)
subdomain = ext.subdomain
if ext.suffix:
mdomain = ext.domain + "." + ext.suffix
else:
mdomain = ext.domain
return mdomain, subdomain def parse(log):
data = log.split('^')
SRC_PORT_IDX = 5-1
DST_PORT_IDX = 6-1
PROTOCOL_IDX = 7-1
protol = data[PROTOCOL_IDX]
dstport = data[DST_PORT_IDX]
if '' == protol and ('' == dstport):
DNS_QUERY_NAME_IDX = 55-1 # domain
if (len(data) < 55):
print "error line:"
print log
return ("", "")
domain = data[DNS_QUERY_NAME_IDX]
mdomain, subdomain = extract_domain(domain)
return (mdomain, subdomain)
else:
print "error line not a DNS:"
print log
return ("", "") #处理域名的最小长度
MIN_LEN=1 #状态个数
N=8
#最大似然概率阈值
T=-50 #模型文件名
FILE_MODEL="hmm-cdn.m"
FILE_MODEL2 ="hmm-cdn-white.pkl" def get_cdn_domains(dir_path):
domain_list=[]
for path in iterbrowse(dir_path):
with open(path) as f:
for line in f:
mdomain, sub_domain = parse(line)
if len(sub_domain) >= MIN_LEN:
domain_list.append(sub_domain)
if len(domain_list) >= 3000:
return domain_list
#else:
# print path, "pass line:", line
return domain_list def domain2ver(domain):
ver=[]
for i in range(0,len(domain)):
ver.append([ord(domain[i])])
return ver def train_hmm(domain_list):
if os.path.exists(FILE_MODEL2):
print "found model file, use it..."
file_model = open(FILE_MODEL2, 'rb')
model = pickle.load(file_model)
file_model.close()
return model X = [[0]]
X_lens = [1]
for domain in domain_list:
ver=domain2ver(domain)
np_ver = np.array(ver)
#print len(np_ver)
try:
X=np.concatenate([X,np_ver])
except ValueError:
print domain
print len(X), len(np_ver)
print X
print np_ver
raise
X_lens.append(len(np_ver)) #remodel = hmm.GaussianHMM(n_components=N, covariance_type="spherical", n_iter=500) #spherical, diag full,tied
remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=500)
remodel.fit(X,X_lens)
#joblib.dump(remodel, FILE_MODEL) file_model = open(FILE_MODEL2, 'wb')
pickle.dump(remodel, file_model)
file_model.close() return remodel def test(remodel, domain_list):
x=[]
y=[]
for domain in domain_list:
domain_ver=domain2ver(domain)
np_ver = np.array(domain_ver)
pro = remodel.score(np_ver)
print "SCORE:(%d) DOMAIN:(%s) " % (pro, domain)
x.append(len(domain))
y.append(pro)
return x,y if __name__ == '__main__':
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_cdn")
domain_list2 = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
#remodel=train_hmm(domain_list)
remodel=train_hmm(domain_list+domain_list2)
#remodel=joblib.load(FILE_MODEL) x_1,y_1=test(remodel, domain_list)
print x_1
print y_1
#sys.exit(0)
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_black")
x_2,y_2=test(remodel, domain_list)
print x_2
print y_2
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
x_3,y_3=test(remodel, domain_list)
print x_3
print y_3
#%matplotlib inline
fig,ax=plt.subplots()
ax.set_xlabel('Domain Length')
ax.set_ylabel('HMM Score')
#ax.scatter(x_3,y_3,color='b',label="WHITE")
ax.scatter(x_2, y_2, color='g', label="DNS tunnel")
ax.scatter(x_1, y_1, color='r', label="CDN")
ax.legend(loc='right')
plt.show()

其中:X = [[0]],X_lens = [] 也可以按照下面方式进行读写。除去了冗余的初始化。

def train_hmm(domain_list):
if os.path.exists(FILE_MODEL2):
print "found model file, use it..."
file_model = open(FILE_MODEL2, 'rb')
model = pickle.load(file_model)
file_model.close()
return model #X = [[0]]
#X_lens = [1]
X = []
X_lens = []
#print X
for domain in domain_list:
ver=domain2ver(domain)
#np_ver = np.array(ver)
try:
#X=np.concatenate([X,np_ver])
X = X + ver
except ValueError:
print domain
print X
print ver
raise
X_lens.append(len(ver))
#remodel = hmm.GaussianHMM(n_components=N, covariance_type="spherical", n_iter=500) #spherical, diag full,tied
remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=500)
remodel.fit(X,X_lens)
#joblib.dump(remodel, FILE_MODEL) file_model = open(FILE_MODEL2, 'wb')
pickle.dump(remodel, file_model)
file_model.close() return remodel

hmm CDN检测的更多相关文章

  1. HMM XSS检测

    HMM XSS检测 转自:http://www.freebuf.com/articles/web/133909.html 前言 上篇我们介绍了HMM的基本原理以及常见的基于参数的异常检测实现,这次我们 ...

  2. 绕过CDN查找真实IP方法总结

    CDN的全称是Content Delivery Network,即内容分发网络.CDN是构建在现有网络基础之上的智能虚拟网络,依靠部署在各地的边缘服务器,通过中心平台的负载均衡.内容分发.调度等功能模 ...

  3. [转载]绕过CDN查找真实IP方法总结

    前言 类似备忘录形式记录一下,这里结合了几篇绕过CDN寻找真实IP的文章,总结一下绕过CDN查找真实的IP的方法 介绍 CDN的全称是Content Delivery Network,即内容分发网络. ...

  4. web渗透测试

    信息收集 网络搜索 目录遍历:site:域名 intitle:index.of 配置文件泄露:site:域名 ext:xml | ext:conf | ext:cnf | ext:reg | ext: ...

  5. 基于Python的渗透测试信息收集系统的设计和实现

    信息收集系统的设计和实现 渗透测试是保卫网络安全的一种有效且必要的技术手段,而渗透测试的本质就是信息收集,信息搜集整理可为后续的情报跟进提供强大的保证,目标资产信息搜集的广度,决定渗透过程的复杂程度, ...

  6. 脚本检测CDN节点资源是否与源站资源一致

    需求: 1.所有要检测的资源url放到一个单独文件中 2.检测cdn节点资源大小与源站文件大小是否一致 3.随机抽查几个资源,检查md5sum是否一致 4.使用多线程,可配置线程数 代码目录: hex ...

  7. 简单检测CDN链接是否有效

    CDN链接经常是使用的.但是,CDN链接挂了怎么办,因此,就要调用使用本站点的库,那么怎么实现呢? 检测CDN的jquery链接是否有效(这种方法比较简单) <script src=" ...

  8. 大数据DDos检测——DDos攻击本质上是时间序列数据,t+1时刻的数据特点和t时刻强相关,因此用HMM或者CRF来做检测是必然! 和一个句子的分词算法CRF没有区别!

    DDos攻击本质上是时间序列数据,t+1时刻的数据特点和t时刻强相关,因此用HMM或者CRF来做检测是必然!——和一个句子的分词算法CRF没有区别!注:传统DDos检测直接基于IP数据发送流量来识别, ...

  9. 基于机器学习的web异常检测——基于HMM的状态序列建模,将原始数据转化为状态机表示,然后求解概率判断异常与否

    基于机器学习的web异常检测 from: https://jaq.alibaba.com/community/art/show?articleid=746 Web防火墙是信息安全的第一道防线.随着网络 ...

随机推荐

  1. 基于Apache Thrift的公路涵洞数据交互实现原理

    基于Apache Thrift的公路涵洞数据交互实现原理 Apache Thrift简介 Apache Thrift(以下简称为“Thrift”) 是 Facebook 实现的一种高效的.支持多种编程 ...

  2. bind()函数的作用

    bind()函数是Function原型上的一个属性,当某个函数调用此方法时,可以通过向bind()函数传入执行对象和调用bind的函数的参数来改变函数的执行对象 /*问题:改变func执行环境,使之输 ...

  3. 第五周课后作业——适用于人工智能的visual studio 的创新分析

    个人觉得作业布置的(2)(3)(4)(5)的顺序并不合理,我将以(5)(2)(3)(4)的顺序开展我的分析. 创新的定义是做出一些改变或创造出新的东西,既是过程,也是结果.这是一个很泛的概念,所以去问 ...

  4. 【Oracle】ORA-00054: resource busy and acquire with NOWAIT specified or timeout expired

    出现此错误的原因是因为事务等待造成的,找出等待的事务,kill即可. 下面是我当时遇到的错误: ---删除表t1时出现错误 SCOTT@GOOD> drop table t1; drop tab ...

  5. [翻译]内存一致性模型 --- memory consistency model

    I will just give the analogy with which I understand memory consistency models (or memory models, fo ...

  6. 【领略RxSwift源码】- 变换操作(Operators)

    在上一篇中,我们分析了在RxSwift中的整个订阅流程.在开讲变换操作之前,首先要弄清楚Sink的概念,不清楚的同学可以翻看上一篇的分析.简单的来说,在每一次订阅操作之前都会进行一次Sink对流的操作 ...

  7. ubuntu区域语言(locale)设置(切换为中文)

    第一步:编辑文件 代码:sudo gedit  /var/lib/locales/supported.d/local (这个文件是所有已经激活的区域语言的列表) 写入相关内容,比如写入如下内容: zh ...

  8. 使用DOS命令查找包含某一字符串的所有文件

      在windows系统下,来查找并修改指定目录下包含某一字符串的所有文件,麻烦又费时.其实在DOS命令中,提供了Findstr命令来查找指定的一个或多个文件文件中包含(或通过参数 /V来控制不包含) ...

  9. WEBGL学习【八】模型视图投影矩阵

    <!--探讨WEBGL中不同图形的绘制方法:[待测试2017.11.6]--> <!DOCTYPE HTML> <html lang="en"> ...

  10. [luogu2585 ZJOI2006] 三色二叉树 (树形dp)

    传送门 Description Input 输入文件名:TRO.IN 输入文件仅有一行,不超过500000个字符,表示一个二叉树序列. Output 输出文件名:TRO.OUT 输出文件也只有一行,包 ...