mxnet 线性模型
mxnet 线性模型
li {list-style-type:decimal;}ol.wiz-list-level2 > li {list-style-type:lower-latin;}ol.wiz-list-level3 > li {list-style-type:lower-roman;}blockquote {padding:0 12px;padding:0 0.75rem;}blockquote > :first-child {margin-top:0;}blockquote > :last-child {margin-bottom:0;}img {border:0;max-width:100%;height:auto !important;margin:2px 0;}table {border-collapse:collapse;border:1px solid #bbbbbb;}td, th {padding:4px 8px;border-collapse:collapse;border:1px solid #bbbbbb;min-height:28px;word-break:break-all;box-sizing: border-box;}.wiz-hide {display:none !important;}
-->
span::selection, .CodeMirror-line > span > span::selection { background: #d7d4f0; }.CodeMirror-line::-moz-selection, .CodeMirror-line > span::-moz-selection, .CodeMirror-line > span > span::-moz-selection { background: #d7d4f0; }.cm-searching {background: #ffa; background: rgba(255, 255, 0, .4);}.cm-force-border { padding-right: .1px; }@media print { .CodeMirror div.CodeMirror-cursors {visibility: hidden;}}.cm-tab-wrap-hack:after { content: ""; }span.CodeMirror-selectedtext { background: none; }.CodeMirror-activeline-background, .CodeMirror-selected {transition: visibility 0ms 100ms;}.CodeMirror-blur .CodeMirror-activeline-background, .CodeMirror-blur .CodeMirror-selected {visibility:hidden;}.CodeMirror-blur .CodeMirror-matchingbracket {color:inherit !important;outline:none !important;text-decoration:none !important;}
-->
span::selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span > span::selection { background: rgba(45, 45, 45, 0.99); }.cm-s-tomorrow-night-eighties .CodeMirror-line::-moz-selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span::-moz-selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span > span::-moz-selection { background: rgba(45, 45, 45, 0.99); }.cm-s-tomorrow-night-eighties .CodeMirror-gutters { background: #000000; border-right: 0px; }.cm-s-tomorrow-night-eighties .CodeMirror-guttermarker { color: #f2777a; }.cm-s-tomorrow-night-eighties .CodeMirror-guttermarker-subtle { color: #777; }.cm-s-tomorrow-night-eighties .CodeMirror-linenumber { color: #515151; }.cm-s-tomorrow-night-eighties .CodeMirror-cursor { border-left: 1px solid #6A6A6A; }.cm-s-tomorrow-night-eighties span.cm-comment { color: #d27b53; }.cm-s-tomorrow-night-eighties span.cm-atom { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-number { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-property, .cm-s-tomorrow-night-eighties span.cm-attribute { color: #99cc99; }.cm-s-tomorrow-night-eighties span.cm-keyword { color: #f2777a; }.cm-s-tomorrow-night-eighties span.cm-string { color: #ffcc66; }.cm-s-tomorrow-night-eighties span.cm-variable { color: #99cc99; }.cm-s-tomorrow-night-eighties span.cm-variable-2 { color: #6699cc; }.cm-s-tomorrow-night-eighties span.cm-def { color: #f99157; }.cm-s-tomorrow-night-eighties span.cm-bracket { color: #CCCCCC; }.cm-s-tomorrow-night-eighties span.cm-tag { color: #f2777a; }.cm-s-tomorrow-night-eighties span.cm-link { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-error { background: #f2777a; color: #6A6A6A; }.cm-s-tomorrow-night-eighties .CodeMirror-activeline-background { background: #343600; }.cm-s-tomorrow-night-eighties .CodeMirror-matchingbracket { text-decoration: underline; color: white !important; }
-->
import mxnet.ndarray as nd
from mxnet import gluon
from mxnet import autograd
# create data
def set_data(true_w, true_b, num_examples, *args, **kwargs):
num_inputs = len(true_w)
X = nd.random_normal(shape=(num_examples, num_inputs))
y = 0
for num in range(num_inputs):
# print(num)
y += true_w[num] * X[:, num]
y += true_b
y += 0.1 * nd.random_normal(shape=y.shape)
return X, y
# create data loader
def data_loader(batch_size, X, y, shuffle=False):
data_set = gluon.data.ArrayDataset(X, y)
data_iter = gluon.data.DataLoader(dataset=data_set, batch_size=batch_size, shuffle=shuffle)
return data_iter
# create net
def set_net(node_num):
net = gluon.nn.Sequential()
net.add(gluon.nn.Dense(node_num))
net.initialize()
return net
# create trainer
def trainer(net, loss_method, learning_rate):
trainer = gluon.Trainer(
net.collect_params(), loss_method, {'learning_rate': learning_rate}
)
return trainer
square_loss = gluon.loss.L2Loss()
# start train
def start_train(epochs, batch_size, data_iter, net, loss_method, tariner, num_examples):
for e in range(epochs):
total_loss = 0
for data, label in data_iter:
with autograd.record():
output = net(data)
loss = loss_method(output, label)
loss.backward()
trainer.step(batch_size)
total_loss += nd.sum(loss).asscalar()
print("第 %d次训练, 平均损失: %f" % (e, total_loss / 1000))
dense = net[0]
print(dense.weight.data())
print(dense.bias.data())
return dense.weight.data(), dense.bias.data()
true_w = [5, 8, 6]
true_b = 6
X, y = set_data(true_w=true_w, true_b=true_b, num_examples=1000)
data_iter = data_loader(batch_size=10, X=X, y=y, shuffle=True)
net = set_net(1)
trainer = trainer(net=net, loss_method='sgd', learning_rate=0.1)
start_train(epochs=5, batch_size=10, data_iter=data_iter, net=net, loss_method=square_loss, tariner=trainer,
num_examples=1000)
<wiz_code_mirror>
def data_loader(batch_size, X, y, shuffle=False):
import mxnet
import mxnet.ndarray as nd
from mxnet import gluon
from mxnet import autograd
# create data
def set_data(true_w, true_b, num_examples, *args, **kwargs):
num_inputs = len(true_w)
X = nd.random_normal(shape=(num_examples, num_inputs))
y = 0
for num in range(num_inputs):
# print(num)
y += true_w[num] * X[:, num]
y += true_b
y += 0.1 * nd.random_normal(shape=y.shape)
return X, y
# create data loader
def data_loader(batch_size, X, y, shuffle=False):
data_set = gluon.data.ArrayDataset(X, y)
data_iter = gluon.data.DataLoader(dataset=data_set, batch_size=batch_size, shuffle=shuffle)
return data_iter
# create net
def set_net(node_num):
net = gluon.nn.Sequential()
net.add(gluon.nn.Dense(node_num))
net.initialize()
return net
# create trainer
def trainer(net, loss_method, learning_rate):
trainer = gluon.Trainer(
net.collect_params(), loss_method, {'learning_rate': learning_rate}
)
return trainer
square_loss = gluon.loss.L2Loss()
# start train
def start_train(epochs, batch_size, data_iter, net, loss_method, tariner, num_examples):
for e in range(epochs):
total_loss = 0
for data, label in data_iter:
with autograd.record():
output = net(data)
loss = loss_method(output, label)
loss.backward()
trainer.step(batch_size)
total_loss += nd.sum(loss).asscalar()
print("第 %d次训练, 平均损失: %f" % (e, total_loss / 1000))
dense = net[0]
print(dense.weight.data())
print(dense.bias.data())
return dense.weight.data(), dense.bias.data()
true_w = [5, 8, 6]
true_b = 6
X, y = set_data(true_w=true_w, true_b=true_b, num_examples=1000)
data_iter = data_loader(batch_size=10, X=X, y=y, shuffle=True)
net = set_net(1)
trainer = trainer(net=net, loss_method='sgd', learning_rate=0.1)
start_train(epochs=5, batch_size=10, data_iter=data_iter, net=net, loss_method=square_loss, tariner=trainer,
num_examples=1000)
mxnet 线性模型的更多相关文章
- MXNET:监督学习
线性回归 给定一个数据点集合 X 和对应的目标值 y,线性模型的目标就是找到一条使用向量 w 和位移 b 描述的线,来尽可能地近似每个样本X[i] 和 y[i]. 数学公式表示为\(\hat{y}=X ...
- 分布式机器学习框架:MxNet 前言
原文连接:MxNet和Caffe之间有什么优缺点一.前言: Minerva: 高效灵活的并行深度学习引擎 不同于cxxnet追求极致速度和易用性,Minerva则提供了一个高效灵活的平台 ...
- 广义线性模型(Generalized Linear Models)
前面的文章已经介绍了一个回归和一个分类的例子.在逻辑回归模型中我们假设: 在分类问题中我们假设: 他们都是广义线性模型中的一个例子,在理解广义线性模型之前需要先理解指数分布族. 指数分布族(The E ...
- ubantu16.04+mxnet +opencv+cuda8.0 环境搭建
ubantu16.04+mxnet +opencv+cuda8.0 环境搭建 建议:环境搭建完成之后,不要更新系统(内核) 转载请注明出处: 微微苏荷 一 我的安装环境 系统:ubuntu16.04 ...
- MXNet设计和实现简介
原文:https://github.com/dmlc/mxnet/issues/797 神经网络本质上是一种语言,我们通过它来表达对应用问题的理解.例如我们用卷积层来表达空间相关性,RNN来表达时间连 ...
- MXNET手写体识别的例子
安装完MXNet之后,运行了官网的手写体识别的例子,这个相当于深度学习的Hello world了吧.. http://mxnet.io/tutorials/python/mnist.html 运行的过 ...
- MXNET安装过程中遇到libinfo导入不了的问题解决
今天尝试安装windows版本的MXNET,在按照官网的运行了python的setup之后,import mxnet时出现如下错误:cannot import name libinfo,在网上查找发现 ...
- MXNet学习~试用卷积~跑CIFAR-10
第一次用卷积,看的别人的模型跑的CIFAR-10,不过吐槽一下...我觉着我的965m加速之后比我的cpu算起来没快多少..正确率64%的样子,没达到模型里说的75%,不知道问题出在哪里 import ...
- MXNet学习~第一个例子~跑MNIST
反正基本上是给自己看的,直接贴写过注释后的代码,可能有的地方理解不对,你多担待,看到了也提出来(基本上对未来的自己说的),三层跑到了97%,毕竟是第一个例子,主要就是用来理解MXNet怎么使用. #导 ...
随机推荐
- tomcat启动报错:org.springframework.beans.factory.BeanCreationException
Web容器在启动时加载 spring 配置文件时解析xml失败常常引起容器启动失败.这次配置文件是 ibatis的sql脚本出了问题: Context initialization failed or ...
- java内存基础(一)
博客园 闪存 首页 新随笔 联系 管理 订阅 随笔- 35 文章- 0 评论- 29 关于Java 数组内存分配一点认识 //总结:[ 数组引用变量存储在栈内存中,数组对象存储在堆内存当中.数 ...
- 1107 Social Clusters
题意:给出n个人(编号为1~n)以及每个人的若干个爱好,把有一个或多个共同爱好的人归为一个集合,问共有多少个集合,每个集合里有多少个人? 思路:典型的并查集题目.并查集的模板init()函数,unio ...
- PHP $_SERVER变量
<?php #测试网址: http://localhost/t/test.php?id=5 //获取域名或主机地址 echo $_SERVER['HTTP_HOST']."<br ...
- Oracle10g客户端连接远程数据库配置图解
yuanwen:http://blog.csdn.net/DKZhu/article/details/6027933 一. 安装oracle客户端 1. 运行setup.exe,出现 2. ...
- oracle删除重复数据
select id from LOG where created >= to_date('2015/2/7 00:00:00', 'yyyy-mm-dd hh24:mi:ss') and cre ...
- 一个简单的AXIS远程调用Web Service示例
我们通常都将编写好的Web Service发布在Tomcat或者其他应用服务器上,然后通过浏览器调用该Web Service,返回规范的XML文件.但是如果我们不通过浏览器调用,而是通过客户端程序调用 ...
- sendClond如何更新邮件模板
$url = 'http://www.sendcloud.net/webapi/template.update.json'; $API_USER = ''; $API_KEY = ''; $conte ...
- 安装sshpass:No package sshpass available
安装sshpass的时候,报了如下错误: No Package sshpass available 更改软件源之后也无效.直到看到StackOverFlow上的一个问题:https://stackov ...
- jquery on事件在IE8下失效的一种情况,及解决方法/bootstrap空间绑定控件事件不好用
同事在复制bootstrap中的select控件之后,发现用$('.selectpicker').selectpicker();刷新下拉框控件不好使,后来发现是用原生js克隆的方法obj.cloneN ...