首先是Net使用的小例子:

#include <vector>
#include <iostream>
#include <caffe/net.hpp>
using namespace std;
using namespace caffe;
int main()
{
std::string proto("./bambootry/deploy.prototxt");
Net<float> nn(proto,caffe::TEST);
vector<string> bn=nn.blob_names();//获取Net中所有Blob对象名
for(int i=;i<bn.size();i++)
{
cout<<"Blob #"<<i<<" : "<<bn[i]<<endl;
}
return ;
}

linux下编译(bambootry为自己创建的文件夹)

g++ -o ./bambootry/netapp ./bambootry/net.cpp -I ./include -D CPU_ONLY \
-I ./.build_release/src/ -L ./build/lib -lcaffe -lglog -lboost_system \
-lprotobuf

结果:

……中间省略

继续省略……

代码注释

src/caffe/proto/caffe.proto中NetParameter

 message NetParameter {
optional string name = ; // consider giving the network a name 网络名称
// DEPRECATED. See InputParameter. The input blobs to the network.
repeated string input = ;//网络的输入blob名称,可以有多个blob
// DEPRECATED. See InputParameter. The shape of the input blobs.
repeated BlobShape input_shape = ;//输入Blob维度信息 // 4D input dimensions -- deprecated. Use "input_shape" instead.
// If specified, for each input blob there should be four
// values specifying the num, channels, height and width of the input blob.
// Thus, there should be a total of (4 * #input) numbers.
repeated int32 input_dim = ;//旧版维度信息 // Whether the network will force every layer to carry out backward operation.
// If set False, then whether to carry out backward is determined
// automatically according to the net structure and learning rates.
// 网络是否强制每个层进行反向传播运算,如果设置为false,则由网络结构和学习速率自动确定是否进行反向传播运算
optional bool force_backward = [default = false];
// The current "state" of the network, including the phase, level, and stage.
// Some layers may be included/excluded depending on this state and the states
// specified in the layers' include and exclude fields.
optional NetState state = ;//网络当前状态,phase, level, 和 stage,根据状态可以确定是否包含某些层 // Print debugging information about results while running Net::Forward,
// Net::Backward, and Net::Update.
// 运行Net::Forward,Net::Backward, and Net::Update是否打印结果的调试信息
optional bool debug_info = [default = false]; // The layers that make up the net. Each of their configurations, including
// connectivity and behavior, is specified as a LayerParameter.
//组成net的所有层。每个层的配置都包括连接属性和行为,由LayerParameter定义。
//ID设为100可以保证层描述置于末尾
repeated LayerParameter layer = ; // ID 100 so layers are printed last. // DEPRECATED: use 'layer' instead.
repeated V1LayerParameter layers = ;//已淘汰
}

include/caffe/net.hpp

 #ifndef CAFFE_NET_HPP_
#define CAFFE_NET_HPP_ #include <map>
#include <set>
#include <string>
#include <utility>
#include <vector> #include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h" namespace caffe { /**
* @brief Connects Layer%s together into a directed acyclic graph (DAG)
* specified by a NetParameter.
*
* TODO(dox): more thorough description.
*/
template <typename Dtype>
class Net {
public:
explicit Net(const NetParameter& param);
explicit Net(const string& param_file, Phase phase,
const int level = , const vector<string>* stages = NULL);
virtual ~Net() {} /// @brief Initialize a network with a NetParameter.
void Init(const NetParameter& param);//用NetParameter对象初始化Net /**
* @brief Run Forward and return the result.
*
*/
//前向传播
const vector<Blob<Dtype>*>& Forward(Dtype* loss = NULL);
/// @brief DEPRECATED; use Forward() instead.已舍弃
const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL) {
LOG_EVERY_N(WARNING, ) << "DEPRECATED: ForwardPrefilled() "
<< "will be removed in a future version. Use Forward().";
return Forward(loss);
} /**
* The From and To variants of Forward and Backward operate on the
* (topological) ordering by which the net is specified. For general DAG
* networks, note that (1) computing from one layer to another might entail
* extra computation on unrelated branches, and (2) computation starting in
* the middle may be incorrect if all of the layers of a fan-in are not
* included.
*/
//前向传播的几种形式
Dtype ForwardFromTo(int start, int end);
Dtype ForwardFrom(int start);
Dtype ForwardTo(int end);
/// @brief DEPRECATED; set input blobs then use Forward() instead.
//弃用。指定输入Blob返回输出Blob。确定输入Blob后使用Forward()替代。
const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
Dtype* loss = NULL); /**
* @brief Zeroes out the diffs of all net parameters.
* Should be run before Backward.
*/
//将diff的所有权值参数清零。应在反向传播之前调用。
void ClearParamDiffs(); /**
* The network backward should take no input and output, since it solely
* computes the gradient w.r.t the parameters, and the data has already been
* provided during the forward pass.
*/
//反向传播的几个函数。不需要输入或输出的Blob,数据已经在前向传播时提供。
void Backward();
void BackwardFromTo(int start, int end);
void BackwardFrom(int start);
void BackwardTo(int end); /**
* @brief Reshape all layers from bottom to top.
*
* This is useful to propagate changes to layer sizes without running
* a forward pass, e.g. to compute output feature size.
*/
//对Net中所有层自底向上的变形函数。无需进行一次前向传播就可以计算各层所需的Blob尺寸。
void Reshape();
//前向传播+反向传播。输入为bottom blob,输出为loss。
Dtype ForwardBackward() {
Dtype loss;
Forward(&loss);
Backward();
return loss;
} /// @brief Updates the network weights based on the diff values computed.
void Update();//根据(Solver)准备好的diff更新网络权值
/**
* @brief Shares weight data of owner blobs with shared blobs.
*
* Note: this is called by Net::Init, and thus should normally not be
* called manually.
*/
void ShareWeights();//由初始化函数调用,不能随意调用 /**
* @brief For an already initialized net, implicitly copies (i.e., using no
* additional memory) the pre-trained layers from another Net.
*/
void ShareTrainedLayersWith(const Net* other);
// For an already initialized net, CopyTrainedLayersFrom() copies the already
// trained layers from another net parameter instance.
/**
* @brief For an already initialized net, copies the pre-trained layers from
* another Net.
*/
void CopyTrainedLayersFrom(const NetParameter& param);
void CopyTrainedLayersFrom(const string trained_filename);
void CopyTrainedLayersFromBinaryProto(const string trained_filename);
void CopyTrainedLayersFromHDF5(const string trained_filename);
/// @brief Writes the net to a proto.
void ToProto(NetParameter* param, bool write_diff = false) const;
/// @brief Writes the net to an HDF5 file.
void ToHDF5(const string& filename, bool write_diff = false) const; /// @brief returns the network name.返回网络名称
inline const string& name() const { return name_; }
/// @brief returns the layer names 返回层名
inline const vector<string>& layer_names() const { return layer_names_; }
/// @brief returns the blob names
inline const vector<string>& blob_names() const { return blob_names_; }
/// @brief returns the blobs
inline const vector<shared_ptr<Blob<Dtype> > >& blobs() const {
return blobs_;
}
/// @brief returns the layers
inline const vector<shared_ptr<Layer<Dtype> > >& layers() const {
return layers_;
}
/// @brief returns the phase: TRAIN or TEST
inline Phase phase() const { return phase_; }
/**
* @brief returns the bottom vecs for each layer -- usually you won't
* need this unless you do per-layer checks such as gradients.
*/
inline const vector<vector<Blob<Dtype>*> >& bottom_vecs() const {
return bottom_vecs_;//grad check 需要调用返回每一层的输入。平时不需要。
}
/**
* @brief returns the top vecs for each layer -- usually you won't
* need this unless you do per-layer checks such as gradients.
*/
inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
return top_vecs_;//grad check 需要调用返回每一层的输出。平时不需要。
}
/// @brief returns the ids of the top blobs of layer i
inline const vector<int> & top_ids(int i) const {
CHECK_GE(i, ) << "Invalid layer id";
CHECK_LT(i, top_id_vecs_.size()) << "Invalid layer id";
return top_id_vecs_[i];
}
/// @brief returns the ids of the bottom blobs of layer i
inline const vector<int> & bottom_ids(int i) const {
CHECK_GE(i, ) << "Invalid layer id";
CHECK_LT(i, bottom_id_vecs_.size()) << "Invalid layer id";
return bottom_id_vecs_[i];
}
inline const vector<vector<bool> >& bottom_need_backward() const {
return bottom_need_backward_;
}
inline const vector<Dtype>& blob_loss_weights() const {
return blob_loss_weights_;
}
inline const vector<bool>& layer_need_backward() const {
return layer_need_backward_;
}
/// @brief returns the parameters
inline const vector<shared_ptr<Blob<Dtype> > >& params() const {
return params_;
}
inline const vector<Blob<Dtype>*>& learnable_params() const {
return learnable_params_;
}
/// @brief returns the learnable parameter learning rate multipliers
inline const vector<float>& params_lr() const { return params_lr_; }
inline const vector<bool>& has_params_lr() const { return has_params_lr_; }
/// @brief returns the learnable parameter decay multipliers
inline const vector<float>& params_weight_decay() const {
return params_weight_decay_;
}
inline const vector<bool>& has_params_decay() const {
return has_params_decay_;
}
const map<string, int>& param_names_index() const {
return param_names_index_;
}
inline const vector<int>& param_owners() const { return param_owners_; }
inline const vector<string>& param_display_names() const {
return param_display_names_;
}
/// @brief Input and output blob numbers 输入输出的blob数量
inline int num_inputs() const { return net_input_blobs_.size(); }
inline int num_outputs() const { return net_output_blobs_.size(); }
inline const vector<Blob<Dtype>*>& input_blobs() const {
return net_input_blobs_;//返回输入blob
}
inline const vector<Blob<Dtype>*>& output_blobs() const {
return net_output_blobs_;//返回输出blob
}
inline const vector<int>& input_blob_indices() const {
return net_input_blob_indices_;//返回输入blob下标
}
inline const vector<int>& output_blob_indices() const {
return net_output_blob_indices_;//返回输出blob下标
}
bool has_blob(const string& blob_name) const;//查找网络是否包含该blob
const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name) const;//如果有则找出
bool has_layer(const string& layer_name) const;//查找网络是否包含该层
const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name) const;//如果有则找出 void set_debug_info(const bool value) { debug_info_ = value; } // Helpers for Init.以下用于初始化
/**
* @brief Remove layers that the user specified should be excluded given the current
* phase, level, and stage.
*/
//过滤掉用户指定的在某阶段、级别、状态下不应包含的层
static void FilterNet(const NetParameter& param,
NetParameter* param_filtered);
/// @brief return whether NetState state meets NetStateRule rule
//判断网络状态是否满足网络规则
static bool StateMeetsRule(const NetState& state, const NetStateRule& rule,
const string& layer_name); // Invoked at specific points during an iteration
class Callback {
protected:
virtual void run(int layer) = ; template <typename T>
friend class Net;
};
const vector<Callback*>& before_forward() const { return before_forward_; }
void add_before_forward(Callback* value) {
before_forward_.push_back(value);
}
const vector<Callback*>& after_forward() const { return after_forward_; }
void add_after_forward(Callback* value) {
after_forward_.push_back(value);
}
const vector<Callback*>& before_backward() const { return before_backward_; }
void add_before_backward(Callback* value) {
before_backward_.push_back(value);
}
const vector<Callback*>& after_backward() const { return after_backward_; }
void add_after_backward(Callback* value) {
after_backward_.push_back(value);
} protected:
// Helpers for Init.
/// @brief Append a new top blob to the net.给网络追加新的输出blob
void AppendTop(const NetParameter& param, const int layer_id,
const int top_id, set<string>* available_blobs,
map<string, int>* blob_name_to_idx);
/// @brief Append a new bottom blob to the net.给网络追加新的输入blob
int AppendBottom(const NetParameter& param, const int layer_id,
const int bottom_id, set<string>* available_blobs,
map<string, int>* blob_name_to_idx);
/// @brief Append a new parameter blob to the net.给网络追加新的权值blob
void AppendParam(const NetParameter& param, const int layer_id,
const int param_id);
//以下几个函数显示调试信息
/// @brief Helper for displaying debug info in Forward.
void ForwardDebugInfo(const int layer_id);
/// @brief Helper for displaying debug info in Backward.
void BackwardDebugInfo(const int layer_id);
/// @brief Helper for displaying debug info in Update.
void UpdateDebugInfo(const int param_id); /// @brief The network name网络名
string name_;
/// @brief The phase: TRAIN or TEST 网络当前阶段(训练 测试)
Phase phase_;
/// @brief Individual layers in the net 网络中的独立层
vector<shared_ptr<Layer<Dtype> > > layers_;
vector<string> layer_names_;//层名称
map<string, int> layer_names_index_;//层名称与索引映射表
vector<bool> layer_need_backward_;//标记某个层是否需要反向传播
/// @brief the blobs storing intermediate results between the layer.
vector<shared_ptr<Blob<Dtype> > > blobs_;//层与层之间传递数据通道
vector<string> blob_names_;//Blob名称
map<string, int> blob_names_index_;//Blob名称和索引映射表
vector<bool> blob_need_backward_;//标记Blob是否需要反向传播
/// bottom_vecs stores the vectors containing the input for each layer.
/// They don't actually host the blobs (blobs_ does), so we simply store
/// pointers.
//存放每个层的输入Blob指针(真正数据所有者为blobs_)
vector<vector<Blob<Dtype>*> > bottom_vecs_;
vector<vector<int> > bottom_id_vecs_;
vector<vector<bool> > bottom_need_backward_;
/// top_vecs stores the vectors containing the output for each layer
//存放每个层的输出Blob指针(真正数据所有者为blobs_)
vector<vector<Blob<Dtype>*> > top_vecs_;
vector<vector<int> > top_id_vecs_;
/// Vector of weight in the loss (or objective) function of each net blob,
/// indexed by blob_id.
//每个blob对全局损失(目标函数)的贡献权重
vector<Dtype> blob_loss_weights_;
vector<vector<int> > param_id_vecs_;
vector<int> param_owners_;
vector<string> param_display_names_;
vector<pair<int, int> > param_layer_indices_;
map<string, int> param_names_index_;
/// blob indices for the input and the output of the net
//网络输入输出blob的索引
vector<int> net_input_blob_indices_;
vector<int> net_output_blob_indices_;
vector<Blob<Dtype>*> net_input_blobs_;
vector<Blob<Dtype>*> net_output_blobs_;
/// The parameters in the network. 网络权值
vector<shared_ptr<Blob<Dtype> > > params_;
vector<Blob<Dtype>*> learnable_params_;//可训练的网络权值
/**
* The mapping from params_ -> learnable_params_: we have
* learnable_param_ids_.size() == params_.size(),
* and learnable_params_[learnable_param_ids_[i]] == params_[i].get()
* if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer
* and learnable_params_[learnable_param_ids_[i]] gives its owner.
*/
//params_ 到 learnable_params_映射:
//当且仅当params_[i]是所有者时,learnable_param_ids_.size() == params_.size(),
//learnable_params_[learnable_param_ids_[i]] == params_[i].get()
//否则 params_[i] 只是共享者,learnable_params_[learnable_param_ids_[i]] 给出所有者
vector<int> learnable_param_ids_;
/// the learning rate multipliers for learnable_params_
vector<float> params_lr_;//学习率倍乘因子
vector<bool> has_params_lr_;
/// the weight decay multipliers for learnable_params_
vector<float> params_weight_decay_;//权值衰减因子
vector<bool> has_params_decay_;
/// The bytes of memory used by this net
size_t memory_used_;//记录网络占用的内存大小
/// Whether to compute and display debug info for the net.
bool debug_info_;//是否显示调试信息
// Callbacks
vector<Callback*> before_forward_;
vector<Callback*> after_forward_;
vector<Callback*> before_backward_;
vector<Callback*> after_backward_; DISABLE_COPY_AND_ASSIGN(Net);//禁止拷贝构造函数、赋值运算函数
}; } // namespace caffe #endif // CAFFE_NET_HPP_

未完待续……

内容来自赵永科《深度学习 21天实战caffe》

【caffe Net】使用举例和代码中文注释的更多相关文章

  1. 去掉VS2010代码中文注释的红色下划线

    VS2010代码中文注释出现红色下划线,代码看上去很不美观,发现是由于安装Visual Assist X插件造成的. 解决办法:打开VAX的Options对话框,取消Advanced --> U ...

  2. 关闭shift中英文切换 英文代码/中文注释随意切换着写。

    x 背景 写代码的时候总是意外的就切成中文了,特别是代码中大小写切换的这种情况... 例如:"public static TimeZone CurrentTime..."publi ...

  3. 【caffe I/O】数据读取层 代码中文注释

    caffe.proto中DataParameter部分 message DataParameter { //输入数据使用的DB类型 enum DB { LEVELDB = ;//使用LEVELDB L ...

  4. 【caffe Layer】代码中文注释

    src/caffe/proto/caffe.proto 中LayerParameter部分 // NOTE // Update the next available ID when you add a ...

  5. WIdo联网代码中文注释

    代码如下 /*************************************************** 这是一个例子的dfrobot维多-无线集成物联网建兴传感器和控制节点 *产品页面及更 ...

  6. [转载]将别人的项目或JAVA文件导入到自己的Eclipse中时,常常会出现JAVA文件的中文注释变成乱码的情况,解决办法

    eclipse 代码中文注释乱码 求解决 将别人的项目或JAVA文件导入到自己的Eclipse中时,常常会出现JAVA文件的中文注释变成乱码的情况,主要原因就是别人的IDE编码格式和自己的Eclips ...

  7. mysql代码里面有中文注释导致语法错误

    一个简单的创建表的代码 DROP database IF exists reg_login; CREATE database reg_login; use reg_login --用户表 create ...

  8. Visual Studio vs2010 去掉中文注释红色下划线;去掉代码红色下划线;

    vs去掉下挂线也分两种: 1.去掉中文注释红色下划线,需要去掉VisualAssist下划线鸡肋功能: 1.选择Visual AssistX Options: 2.把如图所示的勾去掉,解决. 以后再次 ...

  9. C++格式化代码,去掉vs2010编辑器里中文注释的红色波浪线

    原文:http://sulianqi.cn/Article/ART2013053100001.html Vs2010中C++没有智能感应提示,不习惯,于是装了个番茄插件(Visual Assist x ...

随机推荐

  1. iOS自动签名网站

    node.js作为服务端,调用shell脚本进行iOS包重签名. 需要安装:nodejs ,forever 安装环境: 安装nodejs 安装forever: npm install forever ...

  2. Android著名开源库

    UI方面 1.绘制图表MPAndroidChart.hellocharts: https://github.com/PhilJay/MPAndroidChart https://github.com/ ...

  3. UCOSIII时间片轮转调度

    OS_RATE_HZ const OSCfg_TickRate_Hz = (OS_RATE_HZ )OS_CFG_TICK_RATE_HZ; #define OS_CFG_TICK_RATE_HZ 2 ...

  4. iPad所有平板型号屏幕尺寸

    1.iPad所有平板型号屏幕尺寸 尺寸 iPad型号 物理点 像素点 倍数 7.9 iPad Mini 768x1024 768x1024 1 7.9 iPad Mini 2 iPad Mini 3 ...

  5. iview表单数字验证

    sort: [ {required: true, message: '请填写栏目排序', trigger: 'blur'}, {type: 'number', message: '请输入数字', tr ...

  6. Mysql 存储过程 + python调用存储过程 (内置函数讲解及定义摘抄)

    定义 存储过程:就是为以后的使用而保存的一条或多条 MySQL语句的集合.可将其视为批文件,虽然它们的作用不仅限于批处理. 个人使用存储过程的原因就是因为 存储过程比使用单独的SQL语句要快 有如下表 ...

  7. python(time/random模块)

    一.Time模块 1.时间戳 时间戳是指格林威治时间1970年01月01日00时00分00秒(北京时间1970年01月01日08时00分00秒)起至现在的总秒数 最早出现的UNIX操作系统考虑到计算机 ...

  8. TableCache设置过小造成MyISAM频繁损坏 与 把table_cache适当调小mysql能更快地工作

    来源: 前些天说了一下如何修复损坏的MyISAM表,可惜只会修复并不能脱离被动的境地,只有查明了故障原因才会一劳永逸. 如果数据库服务非正常关闭(比如说进程被杀,服务器断电等等),并且此时恰好正在更新 ...

  9. Android笔记(五十) Android中的JSON数据

    JSON是什么: JSON是轻量级的文本数据交换格式 JSON独立于语言和平台 JSON具有自我描述性,更容易理解 JSON语法: 数据在名称/值对中 数据由逗号分割 大括号表示对象 中括号表示数组 ...

  10. Linux 进程IO杂项

    Linux 进程IO杂项 本文结合一个 pwn 例题,在分析例题的过程中穿插介绍相关知识. 例题来源:PWNABLE.KR 网站,Toddler's Bottle 小节,习题 input. 例题内容: ...