mxnet 线性模型
mxnet 线性模型
li {list-style-type:decimal;}ol.wiz-list-level2 > li {list-style-type:lower-latin;}ol.wiz-list-level3 > li {list-style-type:lower-roman;}blockquote {padding:0 12px;padding:0 0.75rem;}blockquote > :first-child {margin-top:0;}blockquote > :last-child {margin-bottom:0;}img {border:0;max-width:100%;height:auto !important;margin:2px 0;}table {border-collapse:collapse;border:1px solid #bbbbbb;}td, th {padding:4px 8px;border-collapse:collapse;border:1px solid #bbbbbb;min-height:28px;word-break:break-all;box-sizing: border-box;}.wiz-hide {display:none !important;}
-->
span::selection, .CodeMirror-line > span > span::selection { background: #d7d4f0; }.CodeMirror-line::-moz-selection, .CodeMirror-line > span::-moz-selection, .CodeMirror-line > span > span::-moz-selection { background: #d7d4f0; }.cm-searching {background: #ffa; background: rgba(255, 255, 0, .4);}.cm-force-border { padding-right: .1px; }@media print { .CodeMirror div.CodeMirror-cursors {visibility: hidden;}}.cm-tab-wrap-hack:after { content: ""; }span.CodeMirror-selectedtext { background: none; }.CodeMirror-activeline-background, .CodeMirror-selected {transition: visibility 0ms 100ms;}.CodeMirror-blur .CodeMirror-activeline-background, .CodeMirror-blur .CodeMirror-selected {visibility:hidden;}.CodeMirror-blur .CodeMirror-matchingbracket {color:inherit !important;outline:none !important;text-decoration:none !important;}
-->
span::selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span > span::selection { background: rgba(45, 45, 45, 0.99); }.cm-s-tomorrow-night-eighties .CodeMirror-line::-moz-selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span::-moz-selection, .cm-s-tomorrow-night-eighties .CodeMirror-line > span > span::-moz-selection { background: rgba(45, 45, 45, 0.99); }.cm-s-tomorrow-night-eighties .CodeMirror-gutters { background: #000000; border-right: 0px; }.cm-s-tomorrow-night-eighties .CodeMirror-guttermarker { color: #f2777a; }.cm-s-tomorrow-night-eighties .CodeMirror-guttermarker-subtle { color: #777; }.cm-s-tomorrow-night-eighties .CodeMirror-linenumber { color: #515151; }.cm-s-tomorrow-night-eighties .CodeMirror-cursor { border-left: 1px solid #6A6A6A; }.cm-s-tomorrow-night-eighties span.cm-comment { color: #d27b53; }.cm-s-tomorrow-night-eighties span.cm-atom { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-number { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-property, .cm-s-tomorrow-night-eighties span.cm-attribute { color: #99cc99; }.cm-s-tomorrow-night-eighties span.cm-keyword { color: #f2777a; }.cm-s-tomorrow-night-eighties span.cm-string { color: #ffcc66; }.cm-s-tomorrow-night-eighties span.cm-variable { color: #99cc99; }.cm-s-tomorrow-night-eighties span.cm-variable-2 { color: #6699cc; }.cm-s-tomorrow-night-eighties span.cm-def { color: #f99157; }.cm-s-tomorrow-night-eighties span.cm-bracket { color: #CCCCCC; }.cm-s-tomorrow-night-eighties span.cm-tag { color: #f2777a; }.cm-s-tomorrow-night-eighties span.cm-link { color: #a16a94; }.cm-s-tomorrow-night-eighties span.cm-error { background: #f2777a; color: #6A6A6A; }.cm-s-tomorrow-night-eighties .CodeMirror-activeline-background { background: #343600; }.cm-s-tomorrow-night-eighties .CodeMirror-matchingbracket { text-decoration: underline; color: white !important; }
-->
import mxnet.ndarray as nd
from mxnet import gluon
from mxnet import autograd
# create data
def set_data(true_w, true_b, num_examples, *args, **kwargs):
num_inputs = len(true_w)
X = nd.random_normal(shape=(num_examples, num_inputs))
y = 0
for num in range(num_inputs):
# print(num)
y += true_w[num] * X[:, num]
y += true_b
y += 0.1 * nd.random_normal(shape=y.shape)
return X, y
# create data loader
def data_loader(batch_size, X, y, shuffle=False):
data_set = gluon.data.ArrayDataset(X, y)
data_iter = gluon.data.DataLoader(dataset=data_set, batch_size=batch_size, shuffle=shuffle)
return data_iter
# create net
def set_net(node_num):
net = gluon.nn.Sequential()
net.add(gluon.nn.Dense(node_num))
net.initialize()
return net
# create trainer
def trainer(net, loss_method, learning_rate):
trainer = gluon.Trainer(
net.collect_params(), loss_method, {'learning_rate': learning_rate}
)
return trainer
square_loss = gluon.loss.L2Loss()
# start train
def start_train(epochs, batch_size, data_iter, net, loss_method, tariner, num_examples):
for e in range(epochs):
total_loss = 0
for data, label in data_iter:
with autograd.record():
output = net(data)
loss = loss_method(output, label)
loss.backward()
trainer.step(batch_size)
total_loss += nd.sum(loss).asscalar()
print("第 %d次训练, 平均损失: %f" % (e, total_loss / 1000))
dense = net[0]
print(dense.weight.data())
print(dense.bias.data())
return dense.weight.data(), dense.bias.data()
true_w = [5, 8, 6]
true_b = 6
X, y = set_data(true_w=true_w, true_b=true_b, num_examples=1000)
data_iter = data_loader(batch_size=10, X=X, y=y, shuffle=True)
net = set_net(1)
trainer = trainer(net=net, loss_method='sgd', learning_rate=0.1)
start_train(epochs=5, batch_size=10, data_iter=data_iter, net=net, loss_method=square_loss, tariner=trainer,
num_examples=1000)
<wiz_code_mirror>
def data_loader(batch_size, X, y, shuffle=False):
import mxnet
import mxnet.ndarray as nd
from mxnet import gluon
from mxnet import autograd
# create data
def set_data(true_w, true_b, num_examples, *args, **kwargs):
num_inputs = len(true_w)
X = nd.random_normal(shape=(num_examples, num_inputs))
y = 0
for num in range(num_inputs):
# print(num)
y += true_w[num] * X[:, num]
y += true_b
y += 0.1 * nd.random_normal(shape=y.shape)
return X, y
# create data loader
def data_loader(batch_size, X, y, shuffle=False):
data_set = gluon.data.ArrayDataset(X, y)
data_iter = gluon.data.DataLoader(dataset=data_set, batch_size=batch_size, shuffle=shuffle)
return data_iter
# create net
def set_net(node_num):
net = gluon.nn.Sequential()
net.add(gluon.nn.Dense(node_num))
net.initialize()
return net
# create trainer
def trainer(net, loss_method, learning_rate):
trainer = gluon.Trainer(
net.collect_params(), loss_method, {'learning_rate': learning_rate}
)
return trainer
square_loss = gluon.loss.L2Loss()
# start train
def start_train(epochs, batch_size, data_iter, net, loss_method, tariner, num_examples):
for e in range(epochs):
total_loss = 0
for data, label in data_iter:
with autograd.record():
output = net(data)
loss = loss_method(output, label)
loss.backward()
trainer.step(batch_size)
total_loss += nd.sum(loss).asscalar()
print("第 %d次训练, 平均损失: %f" % (e, total_loss / 1000))
dense = net[0]
print(dense.weight.data())
print(dense.bias.data())
return dense.weight.data(), dense.bias.data()
true_w = [5, 8, 6]
true_b = 6
X, y = set_data(true_w=true_w, true_b=true_b, num_examples=1000)
data_iter = data_loader(batch_size=10, X=X, y=y, shuffle=True)
net = set_net(1)
trainer = trainer(net=net, loss_method='sgd', learning_rate=0.1)
start_train(epochs=5, batch_size=10, data_iter=data_iter, net=net, loss_method=square_loss, tariner=trainer,
num_examples=1000)
mxnet 线性模型的更多相关文章
- MXNET:监督学习
线性回归 给定一个数据点集合 X 和对应的目标值 y,线性模型的目标就是找到一条使用向量 w 和位移 b 描述的线,来尽可能地近似每个样本X[i] 和 y[i]. 数学公式表示为\(\hat{y}=X ...
- 分布式机器学习框架:MxNet 前言
原文连接:MxNet和Caffe之间有什么优缺点一.前言: Minerva: 高效灵活的并行深度学习引擎 不同于cxxnet追求极致速度和易用性,Minerva则提供了一个高效灵活的平台 ...
- 广义线性模型(Generalized Linear Models)
前面的文章已经介绍了一个回归和一个分类的例子.在逻辑回归模型中我们假设: 在分类问题中我们假设: 他们都是广义线性模型中的一个例子,在理解广义线性模型之前需要先理解指数分布族. 指数分布族(The E ...
- ubantu16.04+mxnet +opencv+cuda8.0 环境搭建
ubantu16.04+mxnet +opencv+cuda8.0 环境搭建 建议:环境搭建完成之后,不要更新系统(内核) 转载请注明出处: 微微苏荷 一 我的安装环境 系统:ubuntu16.04 ...
- MXNet设计和实现简介
原文:https://github.com/dmlc/mxnet/issues/797 神经网络本质上是一种语言,我们通过它来表达对应用问题的理解.例如我们用卷积层来表达空间相关性,RNN来表达时间连 ...
- MXNET手写体识别的例子
安装完MXNet之后,运行了官网的手写体识别的例子,这个相当于深度学习的Hello world了吧.. http://mxnet.io/tutorials/python/mnist.html 运行的过 ...
- MXNET安装过程中遇到libinfo导入不了的问题解决
今天尝试安装windows版本的MXNET,在按照官网的运行了python的setup之后,import mxnet时出现如下错误:cannot import name libinfo,在网上查找发现 ...
- MXNet学习~试用卷积~跑CIFAR-10
第一次用卷积,看的别人的模型跑的CIFAR-10,不过吐槽一下...我觉着我的965m加速之后比我的cpu算起来没快多少..正确率64%的样子,没达到模型里说的75%,不知道问题出在哪里 import ...
- MXNet学习~第一个例子~跑MNIST
反正基本上是给自己看的,直接贴写过注释后的代码,可能有的地方理解不对,你多担待,看到了也提出来(基本上对未来的自己说的),三层跑到了97%,毕竟是第一个例子,主要就是用来理解MXNet怎么使用. #导 ...
随机推荐
- Oracle记录(二) SQLPlus命令
对于Oracle数据库操作主要使用的是命令行方式,而所有的命令都使用sqlplus完成,对于sqlplus有两种形式.就我个人而言,还是比较喜欢UNIX与Linux下的Oracle.呵呵 一种是dos ...
- Difference between boot ip. service ip and persistent ip in hacmp
- boot IP is the original address on a network interface even when the cluster is down - service IP ...
- 全文检索引擎Solr系列——Solr核心概念、配置文件
Document Document是Solr索引(动词,indexing)和搜索的最基本单元,它类似于关系数据库表中的一条记录,可以包含一个或多个字段(Field),每个字段包含一个name和文本值. ...
- Centos7部署CephFS
标签(空格分隔): ceph环境,ceph,cephfs cephfs部署之前准备工作: 1. 一个 clean+active 的cluster cluster部署参考:centos7下搭建ceph ...
- keepalived 预防脑裂检测脚本
1 检查vip [root@mysql2 keepalived]# cat /etc/keepalived/check_brain_keepalived.sh #!/bin/bash # 检查脑裂的脚 ...
- Py修行路 python基础 (十一)迭代器 与 生成器
一.什么是迭代? 迭代通俗的讲就是一个遍历重复的过程. 维基百科中 迭代(Iteration) 的一个通用概念是:重复某个过程的行为,这个过程中的每次重复称为一次迭代.具体对应到Python编程中就是 ...
- CSS 透明
filter:alpha(opacity=60);-moz-opacity:0.5;opacity: 0.5;
- 13-js的面向对象
创建对象的几种常用的方式 1 . 使用Object或对象字面量创建对象 2 . 工厂模式创建对象 3 . 构造函数模式创建对象 4 . 原型模式创建对象 1 . 使用Object或对象字面量创建对象 ...
- 自定义inputformat和outputformat
1. 自定义inputFormat 1.1 需求 无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案 1.2 分析 小文件的优 ...
- errant-transactions
https://www.percona.com/blog/2015/12/02/gtid-failover-with-mysqlslavetrx-fix-errant-transactions/ 使用 ...