BiGAN的复现
数据集是10000个样本,前8000个训练集,后面的用来测试。每个样本是1*144(重构成12*12的矩阵),将原始BiGAN有编码器、判别器和生成器,将里面的全连接层全部替换成了卷积。
from __future__ import print_function, division from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
from pandas import read_csv
import keras.backend as K
import pandas as pd
import matplotlib.pyplot as plt import numpy as np class BIGAN():
def __init__(self):
self.img_rows = 12
self.img_cols = 12
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy']) # Build the generator
self.generator = self.build_generator() # Build the encoder
self.encoder = self.build_encoder() # The part of the bigan that trains the discriminator and encoder
self.discriminator.trainable = False # Generate image from sampled noise
z = Input(shape=(self.latent_dim, ))
img_ = self.generator(z) # Encode image
img = Input(shape=self.img_shape)
z_ = self.encoder(img) # Latent -> img is fake, and img -> latent is valid
fake = self.discriminator([z, img_])
valid = self.discriminator([z_, img]) # Set up and compile the combined model
# Trains generator to fool the discriminator
self.bigan_generator = Model([z, img], [fake, valid])
self.bigan_generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
optimizer=optimizer) def build_encoder(self):
model = Sequential() model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape)
z = model(img) return Model(img, z) def build_generator(self):
model = Sequential() model.add(Dense(64 * 3 * 3, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((3, 3, 64)))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh")) model.summary() z = Input(shape=(self.latent_dim,))
gen_img = model(z) return Model(z, gen_img) def build_discriminator(self): z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)]) model = Dense(14*14, activation="relu")(d_in)
model = Reshape((14, 14, 1))(model)
model = Conv2D(16, kernel_size=3, strides=2,padding="same")(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(32, kernel_size=3, strides=2, padding="same")(model)
model = ZeroPadding2D(padding=((0,1),(0,1)))(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(64, kernel_size=3, strides=2, padding="same")(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Conv2D(128, kernel_size=3, strides=1, padding="same")(model)
model = BatchNormalization(momentum=0.8)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.25)(model)
model = Flatten()(model)
validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity) def train(self, epochs, batch_size=128, sample_interval=50): # Load the dataset
dataset = read_csv('GANData.csv')
values = dataset.values
XY= values
n_train_hours1 =8000
n_train_hours2 = n_train_hours1+1
x_train=XY[:n_train_hours1,:]
x_test =XY[n_train_hours2:, :]
X_train = x_train.reshape(-1,12,12,1)
X_test = x_test.reshape(-1,12,12,1) # Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
FHZ=np.zeros((epochs, 3))
for epoch in range(epochs): # ---------------------
# Train Discriminator
# --------------------- # Sample noise and generate img
z = np.random.normal(size=(batch_size, self.latent_dim))
imgs_ = self.generator.predict(z) # Select a random batch of images and encode
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
z_ = self.encoder.predict(imgs) # Train the discriminator (img -> z is valid, z -> img is fake)
d_loss_real = self.discriminator.train_on_batch([z_, imgs], valid)
d_loss_fake = self.discriminator.train_on_batch([z, imgs_], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # ---------------------
# Train Generator
# --------------------- # Train the generator (z -> img is valid and img -> z is is invalid)
g_loss = self.bigan_generator.train_on_batch([z, imgs], [valid, fake]) # Plot the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0]))
FHZ[epoch,0]=d_loss[0]
FHZ[epoch,1]=d_loss[1]
FHZ[epoch,2]=g_loss[0]
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_interval(epoch)
return FHZ
def sample_interval(self, epoch):
r, c = 5, 5
z = np.random.normal(size=(25, self.latent_dim))
gen_imgs = self.generator.predict(z)
gen_imgs = 0.5 * gen_imgs + 0.5 decoded_imgs = gen_imgs.reshape((gen_imgs.shape[0], -1))
print('decoded_imgs.shape:',decoded_imgs.shape)
data=decoded_imgs
data_df = pd.DataFrame(data) # create and writer pd.DataFrame to excel
writer = pd.ExcelWriter('Result.xlsx')
data_df.to_excel(writer,'page_1',float_format='%.5f') # float_format 控制精度
writer.save()
# Rescale images 0 - 1
#fig.savefig("images/mnist_%d.png" % epoch) if __name__ == '__main__':
bigan = BIGAN()
d_loss=bigan.train(epochs=10, batch_size=32, sample_interval=9)
import numpy
numpy.savetxt("d_lossnum.csv", d_loss, delimiter=',')
BiGAN的复现的更多相关文章
- C++复现经典游戏——扫雷
国庆小长假,当大家都去看人山人海的时候,我独自一人狂码代码.这两天想要实现的内容是Windows上的一个经典游戏——扫雷.相信90后和一些上班族对此并不陌生.然而,从win8开始,扫雷就不再是Wind ...
- [troubleshoot][archlinux][X] plasma(KDE) 窗口滚动刷新冻结(约延迟10s)(已解决,root cause不明,无法再次复现)
现象: konsole,setting等plasma的系统应用反应缓慢,在滚动条滚动时,尤为明显. 触发条件: 并不是十分明确的系统滚动升级(Syu)后,产生. 现象收集: 可疑的dmesg [ :: ...
- 时空上下文视觉跟踪(STC)算法的解读与代码复现(转)
时空上下文视觉跟踪(STC)算法的解读与代码复现 zouxy09@qq.com http://blog.csdn.net/zouxy09 本博文主要是关注一篇视觉跟踪的论文.这篇论文是Kaihua Z ...
- ShadowBroker释放的NSA工具中Esteemaudit漏洞复现过程
没有时间测试呢,朋友们都成功复现,放上网盘地址:https://github.com/x0rz/EQGRP 近日臭名昭著的方程式组织工具包再次被公开,TheShadowBrokers在steemit. ...
- CVE-2017-8464远程命令执行漏洞(震网漏洞)复现
前言 2017年6月13日,微软官方发布编号为CVE-2017-8464的漏洞公告,官方介绍Windows系统在解析快捷方式时存在远程执行任意代码的高危漏洞,黑客可以通过U盘.网络共享等途径触发漏洞, ...
- Samba远程代码执行漏洞(CVE-2017-7494)本地复现
一.复现环境搭建 搭建Debian和kali两个虚拟机: 攻击机:kali (192.168.217.162): 靶机:debian (192.168.217.150). 二.Debian安装并配置s ...
- Office远程代码执行漏洞CVE-2017-0199复现
在刚刚结束的BlackHat2017黑帽大会上,最佳客户端安全漏洞奖颁给了CVE-2017-0199漏洞,这个漏洞是Office系列办公软件中的一个逻辑漏洞,和常规的内存破坏型漏洞不同,这类漏洞无需复 ...
- Node.js CVE-2017-1484复现(详细步骤)
0x00 前言 早上看Sec-news安全文摘的时候,发现腾讯安全应急响应中心发表了一篇文章,Node.js CVE-2017-14849 漏洞分析(https://security.tencent. ...
- 【S2-052】漏洞复现(CVE-2017-9805)
一.漏洞描述 Struts2 的REST插件,如果带有XStream组件,那么在进行反序列化XML请求时,存在未对数据内容进行有效验证的安全隐患,可能发生远程命令执行. 二.受影响版本 Struts2 ...
随机推荐
- vue 多层组件相互嵌套的时候 数据源更新 dom没更新 彻底清除组件缓存
当项目中存在多层组件相互嵌套 组件存在严重缓存时 this.$nextTick(() => { ..... }); 不管用 this.$forceUpdate(); 不管用 只能通过深拷贝浅拷 ...
- 洛谷P1020 导弹拦截【单调栈】
题目:https://www.luogu.org/problemnew/show/P1020 题意: 给定一些导弹的高度. 一个导弹系统只能拦截高度不增的一系列导弹,问如果只有一个系统最多能拦截多少导 ...
- codeforces#1148E. Earth Wind and Fire(贪心)
题目链接: http://codeforces.com/contest/1148/problem/E 题意: 给出两个长度为$n$的序列,将第一个序列变成第二个序列,顺序不重要,只需要元素完全相同即可 ...
- 浙江省赛C.Array in the Pocket(贪心+线段树)
题目链接: http://acm.zju.edu.cn/onlinejudge/showProblem.do?problemCode=4102 题意: 给出一个长度为n的数组,重排它们,让相同位置的数 ...
- Java数组分配内存空间
分配内存空间 数组名=new 数据类型[数组长度]: new关键字用来实现为数组或对象分配内存 数组具有固定的长度.获取数组的长度: 数组名.length 定义数组+分配内存空间 数据类型[]数组名= ...
- Could not initialize class sun.awt.X11GraphicsEnvironment异常处理
原因导致: 经过Google发现很多人也出现同样的问题.从了解了X11GraphicEnvironment这个类的功能入手, 一个Java服务器来处理图片的API基本上是需要运行一个X-server以 ...
- ELK(ElasticSearch, Logstash, Kibana) 实现 Java 分布式系统日志分析架构
一.首先理解为啥要使用ELK 日志主要分为三类:系统日志.应用程序日志和安全日志.系统运维和开发人员可以通过日志了解服务器软硬件信息.检查配置过程中的错误及错误发生的原因.通过分析日志可以了解服务器的 ...
- Java写入的常用技巧(二)
在一般从流接收数据写入介质的场景中,大部分存在每批次数据较小,导致小文件较多的问题. 一般考虑设置一个缓冲池,将多个批次的数据先缓冲进去,达到一定大小,再一次性批量写入 //公共缓冲池和缓冲池大小,如 ...
- 【软件工程】Alpha冲刺(4/6)
链接部分 队名:女生都队 组长博客: 博客链接 作业博客:博客链接 小组内容 恩泽(组长) 过去两天完成了哪些任务 描述 学习调用中国天气网API,接近实现天气推送功能 对天气推送的形式进行讨论及重确 ...
- moogdb操作
本文转载自 https://my.oschina.net/kakakaka/blog/347954 首先,下载mongdb对JAVA的支持,点击这里下载驱动包,这里博主下载的是2.10.1版. mon ...