# _*_coding:UTF-8_*_

import operator
import tldextract
import random
import pickle
import os
import tflearn from math import log
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_1d, max_pool_1d
from tflearn.layers.estimator import regression
from tflearn.layers.normalization import batch_normalization
from sklearn.model_selection import train_test_split def get_cnn_model(max_len, volcab_size=None):
if volcab_size is None:
volcab_size = 10240000 # Building convolutional network
network = tflearn.input_data(shape=[None, max_len], name='input')
network = tflearn.embedding(network, input_dim=volcab_size, output_dim=32) network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2) network = batch_normalization(network)
network = fully_connected(network, 64, activation='relu')
network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax')
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
network = regression(network, optimizer=sgd, loss='categorical_crossentropy') model = tflearn.DNN(network, tensorboard_verbose=0)
return model def get_data_from(file_name):
ans = []
with open(file_name) as f:
for line in f:
domain_name = line.strip()
ans.append(domain_name)
return ans def get_local_data(tag="labeled"):
white_data = get_data_from(file_name="dga_360_sorted.txt")
black_data = get_data_from(file_name="top-1m.csv")
return black_data, white_data def get_data():
black_x, white_x = get_local_data()
black_y, white_y = [1]*len(black_x), [0]*len(white_x) X = black_x + white_x
labels = black_y + white_y # Generate a dictionary of valid characters
valid_chars = {x:idx+1 for idx, x in enumerate(set(''.join(X)))} max_features = len(valid_chars) + 1
print("max_features:", max_features)
maxlen = max([len(x) for x in X])
print("max_len:", maxlen)
maxlen = min(maxlen, 256) # Convert characters to int and pad
X = [[valid_chars[y] for y in x] for x in X]
X = pad_sequences(X, maxlen=maxlen, value=0.) # Convert labels to 0-1
Y = to_categorical(labels, nb_classes=2) volcab_file = "volcab.pkl"
output = open(volcab_file, 'wb')
# Pickle dictionary using protocol 0.
data = {"valid_chars": valid_chars, "max_len": maxlen, "volcab_size": max_features}
pickle.dump(data, output)
output.close() return X, Y, maxlen, max_features def train_model():
X, Y, max_len, volcab_size = get_data() print("X len:", len(X), "Y len:", len(Y))
trainX, testX, trainY, testY = train_test_split(X, Y, test_size=0.2, random_state=42)
print(trainX[:1])
print(trainY[:1])
print(testX[-1:])
print(testY[-1:]) model = get_cnn_model(max_len, volcab_size)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True, batch_size=1024) filename = 'finalized_model.tflearn'
model.save(filename) model.load(filename)
print("Just review 3 sample data test result:")
result = model.predict(testX[0:3])
print(result) def test_model():
volcab_file = "volcab.pkl"
assert os.path.exists(volcab_file)
pkl_file = open(volcab_file, 'rb')
data = pickle.load(pkl_file)
valid_chars, max_document_length, max_features = data["valid_chars"], data["max_len"], data["volcab_size"] print("max_features:", max_features)
print("max_len:", max_document_length) cnn_model = get_cnn_model(max_document_length, max_features)
filename = 'finalized_model.tflearn'
cnn_model.load(filename)
print("predict domains:")
bls = list() with open("dga_360_sorted.txt") as f:
# with open("todo.txt") as f:
lines = f.readlines()
print("domain_list len:", len(lines))
cnt = 1000
for i in range(0, len(lines), cnt):
lines2 = lines[i:i+cnt]
domain_list = [line.strip() for line in lines2]
#print("domain_list sample:", domain_list[:5]) # Convert characters to int and pad
X = [[valid_chars[y] if y in valid_chars else 0 for y in x] for x in domain_list]
X = pad_sequences(X, maxlen=max_document_length, value=0.) result = cnn_model.predict(X)
for i, domain in enumerate(domain_list):
if result[i][1] > .5: #.95:
#print(lines2[i], domain + " is GDA")
print(lines2[i].strip() + "\t" + domain, result[i][1])
bls.append(domain)
else:
#print(lines2[i], domain )
pass
#print(bls)
print(len(bls) , "dga found!") if __name__ == "__main__":
print("train model...")
train_model()
print("test model...")
test_model()

dga model train and test code的更多相关文章

  1. 一步步开发自己的博客 .NET版(9、从model first替换成code first 问题记录)

    为什么要改用code first 用过code first的基本上都不会再想用回model first或是db first(谁用谁知道).不要问我为什么不一开始就直接使用code first,因为那个 ...

  2. Pytorch本人疑问(2)model.train()和model.eval()的区别

    我们在训练时如果使用了BN层和Dropout层,我们需要对model进行标识: model.train():在训练时使用BN层和Dropout层,对模型进行更改. model.eval():在评价时将 ...

  3. MVC学习6 学习使用Code First Migrations功能 把Model的更新同步到DB中

     参考:http://www.asp.net/mvc/tutorials/mvc-4/getting-started-with-aspnet-mvc4/adding-a-new-field-to-th ...

  4. EF7 - What Does “Code First Only” Really Mean

    这篇文章很有价值,但翻译了一段,实在翻译不下去了,没办法,只能转载了. 英文地址:http://blogs.msdn.com/b/adonet/archive/2014/10/21/ef7-what- ...

  5. Code First :使用Entity. Framework编程(8) ----转发 收藏

    第8章 Code First将走向哪里? So far, this book has covered all of the Code First components that reached the ...

  6. Code First :使用Entity. Framework编程(7) ----转发 收藏

    第7章 高级概念 The Code First modeling functionality that you have seen so far should be enough to get you ...

  7. Create Entity Data Model

    http://www.entityframeworktutorial.net/EntityFramework5/create-dbcontext-in-entity-framework5.aspx 官 ...

  8. Clean Code – Chapter 6 Objects and Data Structures

    Data Abstraction Hiding implementation Data/Object Anti-Symmetry Objects hide their data behind abst ...

  9. CV code references

    转:http://www.sigvc.org/bbs/thread-72-1-1.html 一.特征提取Feature Extraction:   SIFT [1] [Demo program][SI ...

随机推荐

  1. css 文本外观属性(text) 和 字体样式属性(font)

    css文本 text外观属性 color: 颜色值(red,blue)十六进制 ,rgb letter-spacing: 字间距 px,em word-spacing: 单词间距 对中文无效 line ...

  2. 201871010113-刘兴瑞《面向对象程序设计(java)》第六-七周学习总结

    项目 内容 这个作业属于哪个课程 <任课教师博客主页链接> https://www.cnblogs.com/nwnu-daizh/ 这个作业的要求在哪里 <作业链接地址>htt ...

  3. SpringBoot application.properties配置参数详情

    multipart multipart.enabled 开启上传支持(默认:true) multipart.file-size-threshold: 大于该值的文件会被写到磁盘上 multipart. ...

  4. 题解:swj社会摇基础第一课

    题目链接 思路:dp,f[i]表示构成i所需要的最小步数 //swj么么哒 #include<bits/stdc++.h> using namespace std; int n; cons ...

  5. luoguP4770 [NOI2018]你的名字

    题意 不妨先考虑\(l=1,r=|S|\)的情况: 这时我们要求的其实是\(S,T\)的本质不同的公共子串数量. 首先对\(S\)建一个后缀自动机,同时对于每个\(T\),我们也建一个自动机. 根据后 ...

  6. Python IO 模式

    IO 模式 对于 Linux 的 network IO: 一次 IO 访问(以read举例),数据会先被拷贝到操作系统内核的缓冲区中,然后才会从操作系统内核的缓冲区 copy 到应用程序的地址空间.所 ...

  7. hdu6494 dp

    http://acm.hdu.edu.cn/showproblem.php?pid=6494 题意 一个长n字符串(n,1e4),'A'代表A得分,'B'代表B得分,'?'代表不确定,一局比赛先得11 ...

  8. vue 路由跳转到本页面,ts 监听路由变化

    @Watch('$route') routechange(to: any, from: any) { //参数不相等 if (to.query.name!=from.query.name) { //t ...

  9. 物联网架构成长之路(37)-基于C#开发串口工具

    0. 前言 作为物联网平台开发,最基础的工具链还是要有的.前几篇博客,介绍了用C#开发一个MQTT的客户端,用于模拟设备连接平台,并发送数据到平台.但是对于一些硬件来说,可能会用到串口uart来发送数 ...

  10. POJ 3041 Asteroids(二分图最大匹配)

    ###题目链接### 题目大意: 给你 N 和 K ,在一个 N * N 个图上有 K 个 小行星.有一个可以横着切或竖着切的武器,问最少切多少次,所有行星都会被毁灭. 分析: 将 1~n 行数加入左 ...