Caffe中Interp层的使用
最近实验当中借鉴了FPN网络,由于FPN网络对图片shape有要求,采用了两种方式,其一是在data_layer.cpp中,对原图进行padding操作;其二是需要对特征图进行类似crop操作,使得两者进行eltwise操作的时候shape是一致的。
简单说一下添加padding的操作,在data_layer.cpp的DataSetup()和load_batch()函数中添加:
//cv_img是读入的原图像,ext_img是填充pad的图像
//extRows,extCols是填充的行和列,具体可查opencv中的copyMakeBorder用法
copyMakeBorder(cv_img,ext_img,0,extRows,0,extCols,BORDER_CONSTANT);
下面介绍第二种Interp插值操作:
第一步:添加相应的代码,主要代码来源于:https://github.com/hszhao/PSPNet
1.在PSPNet/include/cafffe/layers/interp_layer.hpp添加代码,代码如下:
#ifndef CAFFE_INTERP_LAYER_HPP_
#define CAFFE_INTERP_LAYER_HPP_ #include <vector> #include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h" namespace caffe {
/**
* @brief Changes the spatial resolution by bi-linear interpolation.
* The target size is specified in terms of pixels.
* The start and end pixels of the input are mapped to the start
* and end pixels of the output.
*/
template <typename Dtype>
class InterpLayer : public Layer<Dtype> {
public:
explicit InterpLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top); virtual inline const char* type() const { return "Interp"; }
virtual inline int ExactNumBottomBlobs() const { return 1; } //此处可以根据需求修改Interp层的输入个数,默认是1;下同
virtual inline int ExactNumTopBlobs() const { return 1; } protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); int num_, channels_;
int height_in_, width_in_;
int height_out_, width_out_;
int pad_beg_, pad_end_;
int height_in_eff_, width_in_eff_;
}; } // namespace caffe #endif // CAFFE_CONV_LAYER_HPP_
2.在PSPNet/include/caffe/util/interp.hpp中添加代码,代码如下:
#ifndef CAFFE_UTIL_INTERP_H_
#define CAFFE_UTIL_INTERP_H_ #include <cublas_v2.h>
#include "caffe/proto/caffe.pb.h" namespace caffe { // Bi-linear interpolation
// IN : [channels height1 width1] cropped from a bigger [Height1 Width1] image
// OUT: [channels height2 width2] cropped from a bigger [Height2 Width2] image template <typename Dtype, bool packed>
void caffe_cpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2); template <typename Dtype, bool packed>
void caffe_gpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2); // Backward (adjoint) operation
template <typename Dtype, bool packed>
void caffe_cpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2); template <typename Dtype, bool packed>
void caffe_gpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2); // Create Gaussian pyramid of an image. Assume output space is pre-allocated.
// IN : [channels height width]
template <typename Dtype, bool packed>
void caffe_cpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels); template <typename Dtype, bool packed>
void caffe_gpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels); /*
template <typename Dtype, bool packed>
void caffe_cpu_mosaic(const int channels,
const Dtype *data1, const MosaicParameter mosaic_params1,
const Dtype *data_pyr, const int levels,
Dtype *data2, const MosaicParameter mosaic_params2);
template <typename Dtype, bool packed>
void caffe_gpu_mosaic(const int channels,
const Dtype *data1, const MosaicParameter mosaic_params1,
const Dtype *data_pyr, const int levels,
Dtype *data2, const MosaicParameter mosaic_params2);
*/ } // namespace caffe #endif
3.在PSPNet/include/caffe/common.cuh 添加代码,代码如下:
#ifndef CAFFE_COMMON_CUH_
#define CAFFE_COMMON_CUH_ #include <cuda.h> #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 //注意标红需要添加,不然会报错(根据自己服务器CUDA配置需求添加) #else
// CUDA: atomicAdd is not defined for doubles
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
} #endif
#endif
4.在PSPNet/src/caffe/layers/interp_layer.cpp 下添加代码,代码如下:
#include <vector> #include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/interp.hpp"
#include "caffe/layers/interp_layer.hpp" namespace caffe { template <typename Dtype>
void InterpLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
InterpParameter interp_param = this->layer_param_.interp_param();
pad_beg_ = interp_param.pad_beg();
pad_end_ = interp_param.pad_end();
CHECK_LE(pad_beg_, 0) << "Only supports non-pos padding (cropping) for now";
CHECK_LE(pad_end_, 0) << "Only supports non-pos padding (cropping) for now";
} template <typename Dtype>
void InterpLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
num_ = bottom[0]->num();
channels_ = bottom[0]->channels();
height_in_ = bottom[0]->height();
width_in_ = bottom[0]->width();
height_in_eff_ = height_in_ + pad_beg_ + pad_end_;
width_in_eff_ = width_in_ + pad_beg_ + pad_end_;
InterpParameter interp_param = this->layer_param_.interp_param();
if (interp_param.has_shrink_factor() &&
!interp_param.has_zoom_factor()) {
const int shrink_factor = interp_param.shrink_factor();
CHECK_GE(shrink_factor, 1) << "Shrink factor must be positive";
height_out_ = (height_in_eff_ - 1) / shrink_factor + 1;
width_out_ = (width_in_eff_ - 1) / shrink_factor + 1;
} else if (interp_param.has_zoom_factor() &&
!interp_param.has_shrink_factor()) {
const int zoom_factor = interp_param.zoom_factor();
CHECK_GE(zoom_factor, 1) << "Zoom factor must be positive";
height_out_ = height_in_eff_ + (height_in_eff_ - 1) * (zoom_factor - 1);
width_out_ = width_in_eff_ + (width_in_eff_ - 1) * (zoom_factor - 1);
} else if (interp_param.has_height() && interp_param.has_width()) {
height_out_ = interp_param.height();
width_out_ = interp_param.width();
} else if (interp_param.has_shrink_factor() &&
interp_param.has_zoom_factor()) {
const int shrink_factor = interp_param.shrink_factor();
const int zoom_factor = interp_param.zoom_factor();
CHECK_GE(shrink_factor, 1) << "Shrink factor must be positive";
CHECK_GE(zoom_factor, 1) << "Zoom factor must be positive";
height_out_ = (height_in_eff_ - 1) / shrink_factor + 1;
width_out_ = (width_in_eff_ - 1) / shrink_factor + 1;
height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1);
width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1);
} else {
LOG(FATAL);
}
CHECK_GT(height_in_eff_, 0) << "height should be positive";
CHECK_GT(width_in_eff_, 0) << "width should be positive";
CHECK_GT(height_out_, 0) << "height should be positive";
CHECK_GT(width_out_, 0) << "width should be positive";
top[0]->Reshape(num_, channels_, height_out_, width_out_);
} template <typename Dtype>
void InterpLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_cpu_interp2<Dtype,false>(num_ * channels_,
bottom[0]->cpu_data(), - pad_beg_, - pad_beg_, height_in_eff_, width_in_eff_, height_in_, width_in_,
top[0]->mutable_cpu_data(), 0, 0, height_out_, width_out_, height_out_, width_out_);
} template <typename Dtype>
void InterpLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
caffe_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_cpu_diff());
caffe_cpu_interp2_backward<Dtype,false>(num_ * channels_,
bottom[0]->mutable_cpu_diff(), - pad_beg_, - pad_beg_, height_in_eff_, width_in_eff_, height_in_, width_in_,
top[0]->cpu_diff(), 0, 0, height_out_, width_out_, height_out_, width_out_);
} #ifndef CPU_ONLY
template <typename Dtype>
void InterpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_gpu_interp2<Dtype,false>(num_ * channels_,
bottom[0]->gpu_data(), - pad_beg_, - pad_beg_, height_in_eff_, width_in_eff_, height_in_, width_in_,
top[0]->mutable_gpu_data(), 0, 0, height_out_, width_out_, height_out_, width_out_);
} template <typename Dtype>
void InterpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
caffe_gpu_interp2_backward<Dtype,false>(num_ * channels_,
bottom[0]->mutable_gpu_diff(), - pad_beg_, - pad_beg_, height_in_eff_, width_in_eff_, height_in_, width_in_,
top[0]->gpu_diff(), 0, 0, height_out_, width_out_, height_out_, width_out_);
}
#endif #ifdef CPU_ONLY
STUB_GPU(InterpLayer);
#endif INSTANTIATE_CLASS(InterpLayer);
REGISTER_LAYER_CLASS(Interp); } // namespace caffe
4.在PSPNet/src/caffe/util/interp.cpp中添加代码,代码如下:
#include "caffe/common.hpp"
#include "caffe/util/interp.hpp"
#include <algorithm>
#include <cmath> namespace caffe { // Bi-linear interpolation
// IN : [channels height1 width1] cropped from a bigger [Height1 Width1] image
// OUT: [channels height2 width2] cropped from a bigger [Height2 Width2] image
template <typename Dtype, bool packed>
void caffe_cpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
// special case: just copy
if (height1 == height2 && width1 == width2) {
for (int h2 = 0; h2 < height2; ++h2) {
const int h1 = h2;
for (int w2 = 0; w2 < width2; ++w2) {
const int w1 = w2;
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
return;
}
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
for (int h2 = 0; h2 < height2; ++h2) {
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
for (int w2 = 0; w2 < width2; ++w2) {
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[channels * w1p]) +
h1lambda * (w0lambda * pos1[channels * h1p * Width1] + w1lambda * pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * Width1] + w1lambda * pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
} // Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, bool packed>
void caffe_cpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
// special case: same-size matching grids
if (height1 == height2 && width1 == width2) {
for (int h2 = 0; h2 < height2; ++h2) {
const int h1 = h2;
for (int w2 = 0; w2 < width2; ++w2) {
const int w1 = w2;
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
return;
}
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
for (int h2 = 0; h2 < height2; ++h2) {
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
for (int w2 = 0; w2 < width2; ++w2) {
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos1[0] += h0lambda * w0lambda * pos2[0];
pos1[channels * w1p] += h0lambda * w1lambda * pos2[0];
pos1[channels * h1p * Width1] += h1lambda * w0lambda * pos2[0];
pos1[channels * (h1p * Width1 + w1p)] += h1lambda * w1lambda * pos2[0];
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos1[0] += h0lambda * w0lambda * pos2[0];
pos1[w1p] += h0lambda * w1lambda * pos2[0];
pos1[h1p * Width1] += h1lambda * w0lambda * pos2[0];
pos1[h1p * Width1 + w1p] += h1lambda * w1lambda * pos2[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
}
} // Create Gaussian pyramid of an image. Assume output space is pre-allocated.
// IN : [channels height width]
template <typename Dtype, bool packed>
void caffe_cpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels) {
CHECK(height > 0 && width > 0 && levels >= 0);
int height1 = height, width1 = width;
int height2 = height, width2 = width;
const Dtype *data1 = data;
Dtype *data2 = data_pyr;
for (int l = 0; l < levels; ++l) {
height2 /= 2;
width2 /= 2;
if (height2 == 0 || width2 == 0) {
break;
}
for (int h2 = 0; h2 < height2; ++h2) {
const int h1 = 2 * h2;
for (int w2 = 0; w2 < width2; ++w2) {
const int w1 = 2 * w2;
if (packed) {
const Dtype* pos1 = &data1[channels * (h1 * width1 + w1)];
Dtype* pos2 = &data2[channels * (h2 * width2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[channels] +
pos1[channels * width1] + pos1[channels * (width1 + 1)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[h1 * width1 + w1];
Dtype* pos2 = &data2[h2 * width2 + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[1] +
pos1[width1] + pos1[width1 + 1]);
pos1 += width1 * height1;
pos2 += width2 * height2;
}
}
}
}
data1 = data2;
height1 = height2;
width1 = width2;
data2 += channels * height2 * width2;
}
} /*
template <typename Dtype, bool packed>
void caffe_cpu_mosaic(const int channels,
const Dtype *data1, const MosaicParameter mosaic_params1,
const Dtype *data_pyr, const int levels,
Dtype *data2, const MosaicParameter mosaic_params2) {
const int num1 = mosaic_params1.rects_size();
const int num2 = mosaic_params2.rects_size();
CHECK(num1 == num2 || (num1 == 1 && num2 > 1) || (num2 == 1 && num1 > 1));
const int num = std::max(num1, num2);
for (int i = 0; i < num; ++i) {
const Rect rect1 = mosaic_params1.rects((i < num1) ? i : 0);
const Rect rect2 = mosaic_params2.rects((i < num2) ? i : 0);
int level = log2(sqrt((float)rect1.height() * rect1.width() / rect2.height() / rect2.width()));
level = std::max(0, std::min(levels, level));
if (data_pyr == 0 || level == 0) {
caffe_cpu_interp2<Dtype,packed>(channels,
data1, rect1.x(), rect1.y(), rect1.height(), rect1.width(), mosaic_params1.height(), mosaic_params1.width(),
data2, rect2.x(), rect2.y(), rect2.height(), rect2.width(), mosaic_params2.height(), mosaic_params2.width());
}
else {
const Dtype *data_pyr_l = data_pyr;
int factor = 2;
for (int l = 1; l < level; ++l) {
data_pyr_l += channels * (mosaic_params1.height() / factor) * (mosaic_params1.width() / factor);
factor *= 2;
}
caffe_cpu_interp2<Dtype,packed>(channels,
data_pyr_l, rect1.x() / factor, rect1.y() / factor, rect1.height() / factor, rect1.width() / factor, mosaic_params1.height() / factor, mosaic_params1.width() / factor,
data2, rect2.x(), rect2.y(), rect2.height(), rect2.width(), mosaic_params2.height(), mosaic_params2.width());
}
}
}
template <typename Dtype, bool packed>
void caffe_gpu_mosaic(const int channels,
const Dtype *data1, const MosaicParameter mosaic_params1,
const Dtype *data_pyr, const int levels,
Dtype *data2, const MosaicParameter mosaic_params2) {
const int num1 = mosaic_params1.rects_size();
const int num2 = mosaic_params2.rects_size();
CHECK(num1 == num2 || (num1 == 1 && num2 > 1) || (num2 == 1 && num1 > 1));
const int num = std::max(num1, num2);
for (int i = 0; i < num; ++i) {
const Rect rect1 = mosaic_params1.rects((i < num1) ? i : 0);
const Rect rect2 = mosaic_params2.rects((i < num2) ? i : 0);
int level = log2(sqrt((float)rect1.height() * rect1.width() / rect2.height() / rect2.width()));
level = std::max(0, std::min(levels, level));
if (data_pyr == 0 || level == 0) {
caffe_gpu_interp2<Dtype,packed>(channels,
data1, rect1.x(), rect1.y(), rect1.height(), rect1.width(), mosaic_params1.height(), mosaic_params1.width(),
data2, rect2.x(), rect2.y(), rect2.height(), rect2.width(), mosaic_params2.height(), mosaic_params2.width());
}
else {
const Dtype *data_pyr_l = data_pyr;
int factor = 2;
for (int l = 1; l < level; ++l) {
data_pyr_l += channels * (mosaic_params1.height() / factor) * (mosaic_params1.width() / factor);
factor *= 2;
}
caffe_gpu_interp2<Dtype,packed>(channels,
data_pyr_l, rect1.x() / factor, rect1.y() / factor, rect1.height() / factor, rect1.width() / factor, mosaic_params1.height() / factor, mosaic_params1.width() / factor,
data2, rect2.x(), rect2.y(), rect2.height(), rect2.width(), mosaic_params2.height(), mosaic_params2.width());
}
}
}
*/ // Explicit instances
template void caffe_cpu_interp2<float,false>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_cpu_interp2<float,true>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_cpu_interp2<double,false>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_cpu_interp2<double,true>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int); template void caffe_cpu_interp2_backward<float,false>(const int, float *, const int, const int, const int, const int, const int, const int, const float *, const int, const int, const int, const int, const int, const int);
template void caffe_cpu_interp2_backward<double,false>(const int, double *, const int, const int, const int, const int, const int, const int, const double *, const int, const int, const int, const int, const int, const int); template void caffe_cpu_pyramid2<float,false>(const int, const float *, const int, const int, float *, const int);
template void caffe_cpu_pyramid2<float,true>(const int, const float *, const int, const int, float *, const int);
template void caffe_cpu_pyramid2<double,false>(const int, const double *, const int, const int, double *, const int);
template void caffe_cpu_pyramid2<double,true>(const int, const double *, const int, const int, double *, const int); /*
template void caffe_cpu_mosaic<float,false>(const int, const float *, const MosaicParameter, const float *, const int, float *, const MosaicParameter);
template void caffe_cpu_mosaic<float,true>(const int, const float *, const MosaicParameter, const float *, const int, float *, const MosaicParameter);
template void caffe_cpu_mosaic<double,false>(const int, const double *, const MosaicParameter, const double *, const int, double *, const MosaicParameter);
template void caffe_cpu_mosaic<double,true>(const int, const double *, const MosaicParameter, const double *, const int, double *, const MosaicParameter);
template void caffe_gpu_mosaic<float,false>(const int, const float *, const MosaicParameter, const float *, const int, float *, const MosaicParameter);
template void caffe_gpu_mosaic<float,true>(const int, const float *, const MosaicParameter, const float *, const int, float *, const MosaicParameter);
template void caffe_gpu_mosaic<double,false>(const int, const double *, const MosaicParameter, const double *, const int, double *, const MosaicParameter);
template void caffe_gpu_mosaic<double,true>(const int, const double *, const MosaicParameter, const double *, const int, double *, const MosaicParameter);
*/ } // namespace caffe
5.在PSPNet/src/caffe/util/interp.cu中添加代码,代码如下:
#include "caffe/common.hpp"
#include "caffe/common.cuh"
#include "caffe/util/interp.hpp" namespace caffe { // Bi-linear interpolation
// IN : [channels height1 width1] cropped from a bigger [Height1 Width1] image
// OUT: [channels height2 width2] cropped from a bigger [Height2 Width2] image
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel(const int n, const float rheight, const float rwidth,
const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
const Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[channels * w1p]) +
h1lambda * (w0lambda * pos1[channels * h1p * Width1] + w1lambda * pos1[channels * (h1p * Width1 + w1p)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] =
h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) +
h1lambda * (w0lambda * pos1[h1p * Width1] + w1lambda * pos1[h1p * Width1 + w1p]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
} template <typename Dtype, bool packed>
void caffe_gpu_interp2(const int channels,
const Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
caffe_gpu_interp2_kernel<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
} // Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, bool packed>
__global__ void caffe_gpu_interp2_kernel_backward(const int n, const float rheight, const float rwidth,
const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
pos1[0] += pos2[0];
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
return;
}
//
const float h1r = rheight * h2;
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Dtype h1lambda = h1r - h1;
const Dtype h0lambda = Dtype(1.) - h1lambda;
//
const float w1r = rwidth * w2;
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Dtype w1lambda = w1r - w1;
const Dtype w0lambda = Dtype(1.) - w1lambda;
//
if (packed) {
Dtype* pos1 = &data1[channels * ((y1 + h1) * Width1 + (x1 + w1))];
const Dtype* pos2 = &data2[channels * ((y2 + h2) * Width2 + (x2 + w2))];
for (int c = 0; c < channels; ++c) {
atomicAdd(&pos1[0], h0lambda * w0lambda * pos2[0]);
atomicAdd(&pos1[channels * w1p], h0lambda * w1lambda * pos2[0]);
atomicAdd(&pos1[channels * h1p * Width1], h1lambda * w0lambda * pos2[0]);
atomicAdd(&pos1[channels * (h1p * Width1 + w1p)], h1lambda * w1lambda * pos2[0]);
pos1++;
pos2++;
}
}
else {
Dtype* pos1 = &data1[(y1 + h1) * Width1 + (x1 + w1)];
const Dtype* pos2 = &data2[(y2 + h2) * Width2 + (x2 + w2)];
for (int c = 0; c < channels; ++c) {
atomicAdd(&pos1[0], h0lambda * w0lambda * pos2[0]);
atomicAdd(&pos1[w1p], h0lambda * w1lambda * pos2[0]);
atomicAdd(&pos1[h1p * Width1], h1lambda * w0lambda * pos2[0]);
atomicAdd(&pos1[h1p * Width1 + w1p], h1lambda * w1lambda * pos2[0]);
pos1 += Width1 * Height1;
pos2 += Width2 * Height2;
}
}
}
} template <typename Dtype, bool packed>
void caffe_gpu_interp2_backward(const int channels,
Dtype *data1, const int x1, const int y1, const int height1, const int width1, const int Height1, const int Width1,
const Dtype *data2, const int x2, const int y2, const int height2, const int width2, const int Height2, const int Width2) {
CHECK(x1 >= 0 && y1 >= 0 && height1 > 0 && width1 > 0 && x2 >= 0 && y2 >= 0 && height2 > 0 && width2 > 0);
CHECK(Width1 >= width1 + x1 && Height1 >= height1 + y1 && Width2 >= width2 + x2 && Height2 >= height2 + y2);
const float rheight = (height2 > 1) ? static_cast<float>(height1 - 1) / (height2 - 1) : 0.f;
const float rwidth = (width2 > 1) ? static_cast<float>(width1 - 1) / (width2 - 1) : 0.f;
const int num_kernels = height2 * width2;
caffe_gpu_interp2_kernel_backward<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, rheight, rwidth, channels,
data1, x1, y1, height1, width1, Height1, Width1,
data2, x2, y2, height2, width2, Height2, Width2);
CUDA_POST_KERNEL_CHECK;
} // Create Gaussian pyramid of an image. Assume output space is pre-allocated.
// IN : [channels height width]
template <typename Dtype, bool packed>
__global__ void caffe_gpu_pyramid2_kernel(const int n, const int channels,
const Dtype *data1, const int height1, const int width1,
Dtype *data2, const int height2, const int width2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
const int w1 = 2 * w2;
const int h1 = 2 * h2;
if (packed) {
const Dtype* pos1 = &data1[channels * (h1 * width1 + w1)];
Dtype* pos2 = &data2[channels * (h2 * width2 + w2)];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[channels] +
pos1[channels * width1] + pos1[channels * (width1 + 1)]);
pos1++;
pos2++;
}
}
else {
const Dtype* pos1 = &data1[h1 * width1 + w1];
Dtype* pos2 = &data2[h2 * width2 + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = static_cast<Dtype>(.25) *
(pos1[0] + pos1[1] +
pos1[width1] + pos1[width1 + 1]);
pos1 += width1 * height1;
pos2 += width2 * height2;
}
}
}
} template <typename Dtype, bool packed>
void caffe_gpu_pyramid2(const int channels,
const Dtype *data, const int height, const int width,
Dtype *data_pyr, const int levels) {
CHECK(height > 0 && width > 0 && levels >= 0);
int height1 = height, width1 = width;
int height2 = height, width2 = width;
const Dtype *data1 = data;
Dtype *data2 = data_pyr;
for (int l = 0; l < levels; ++l) {
height2 /= 2;
width2 /= 2;
if (height2 == 0 || width2 == 0) {
break;
}
const int num_kernels = height2 * width2;
caffe_gpu_pyramid2_kernel<Dtype,packed><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, channels, data1, height1, width1, data2, height2, width2);
CUDA_POST_KERNEL_CHECK;
data1 = data2;
height1 = height2;
width1 = width2;
data2 += channels * height2 * width2;
}
} // Explicit instances
template void caffe_gpu_interp2<float,false>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<float,true>(const int, const float *, const int, const int, const int, const int, const int, const int, float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,false>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2<double,true>(const int, const double *, const int, const int, const int, const int, const int, const int, double *, const int, const int, const int, const int, const int, const int); template void caffe_gpu_interp2_backward<float,false>(const int, float *, const int, const int, const int, const int, const int, const int, const float *, const int, const int, const int, const int, const int, const int);
template void caffe_gpu_interp2_backward<double,false>(const int, double *, const int, const int, const int, const int, const int, const int, const double *, const int, const int, const int, const int, const int, const int); template void caffe_gpu_pyramid2<float,false>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<float,true>(const int, const float *, const int, const int, float *, const int);
template void caffe_gpu_pyramid2<double,false>(const int, const double *, const int, const int, double *, const int);
template void caffe_gpu_pyramid2<double,true>(const int, const double *, const int, const int, double *, const int); } // namespace caffe
6.在PSPNet/src/caffe/proto/caffe.proto中添加代码,代码如下:
message LayerParameter {
optional string name = 1; // the layer name
optional string type = 2; // the layer type
repeated string bottom = 3; // the name of each bottom blob
repeated string top = 4; // the name of each top blob // The train / test phase for computation.
optional Phase phase = 10; // The amount of weight to assign each top blob in the objective.
// Each layer assigns a default value, usually of either 0 or 1,
// to each top blob.
repeated float loss_weight = 5; // Specifies training parameters (multipliers on global learning constants,
// and the name and other settings used for weight sharing).
repeated ParamSpec param = 6; // The blobs containing the numeric parameters of the layer.
repeated BlobProto blobs = 7; // Specifies on which bottoms the backpropagation should be skipped.
// The size must be either 0 or equal to the number of bottoms.
repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network,
// based on the current NetState. You may specify a non-zero number of rules
// to include OR exclude, but not both. If no include or exclude rules are
// specified, the layer is always included. If the current NetState meets
// ANY (i.e., one or more) of the specified rules, the layer is
// included/excluded.
repeated NetStateRule include = 8;
repeated NetStateRule exclude = 9; // Parameters for data pre-processing.
optional TransformationParameter transform_param = 100; // Parameters shared by loss layers.
optional LossParameter loss_param = 101; // Layer type-specific parameters.
//
// Note: certain layers may have more than one computational engine
// for their implementation. These layers include an Engine type and
// engine parameter for selecting the implementation.
// The default for the engine is set by the ENGINE switch at compile-time.
optional AccuracyParameter accuracy_param = 102;
optional AdaptiveBiasChannelParameter adaptive_bias_channel_param = 148;
optional ArgMaxParameter argmax_param = 103;
optional BatchNormParameter batch_norm_param = 139;
optional BNParameter bn_param = 152;
optional BiasParameter bias_param = 141;
optional BiasChannelParameter bias_channel_param = 149;
optional ConcatParameter concat_param = 104;
optional ContrastiveLossParameter contrastive_loss_param = 105;
optional ConvolutionParameter convolution_param = 106;
optional DataParameter data_param = 107;
optional DenseCRFParameter dense_crf_param = 146;
optional DomainTransformParameter domain_transform_param = 147;
optional DropoutParameter dropout_param = 108;
optional DummyDataParameter dummy_data_param = 109;
optional EltwiseParameter eltwise_param = 110;
optional ELUParameter elu_param = 140;
optional EmbedParameter embed_param = 137;
optional ExpParameter exp_param = 111;
optional FlattenParameter flatten_param = 135;
optional HDF5DataParameter hdf5_data_param = 112;
optional HDF5OutputParameter hdf5_output_param = 113;
optional HingeLossParameter hinge_loss_param = 114;
optional ImageDataParameter image_data_param = 115;
optional InfogainLossParameter infogain_loss_param = 116;
optional InnerProductParameter inner_product_param = 117;
optional InterpParameter interp_param = 143; //注意143不能和其他的数字重复,可以自己情况调整
optional LogParameter log_param = 134;
optional LRNParameter lrn_param = 118;
optional MatReadParameter mat_read_param = 151;
optional MatWriteParameter mat_write_param = 145;
optional MemoryDataParameter memory_data_param = 119;
optional MVNParameter mvn_param = 120;
optional PoolingParameter pooling_param = 121;
optional PowerParameter power_param = 122;
optional PReLUParameter prelu_param = 131;
optional PythonParameter python_param = 130;
optional ReductionParameter reduction_param = 136;
optional ReLUParameter relu_param = 123;
optional ReshapeParameter reshape_param = 133;
optional ScaleParameter scale_param = 142;
optional SegAccuracyParameter seg_accuracy_param = 144;
optional SigmoidParameter sigmoid_param = 124;
optional SoftmaxParameter softmax_param = 125;
optional SPPParameter spp_param = 132;
optional SliceParameter slice_param = 126;
optional TanHParameter tanh_param = 127;
optional ThresholdParameter threshold_param = 128;
optional TileParameter tile_param = 138;
optional UniqueLabelParameter unique_label_param = 150;
optional WindowDataParameter window_data_param = 129;
}
message InterpParameter {
optional int32 height = 1 [default = 0]; // Height of output
optional int32 width = 2 [default = 0]; // Width of output
optional int32 zoom_factor = 3 [default = 1]; // zoom factor
optional int32 shrink_factor = 4 [default = 1]; // shrink factor
optional int32 pad_beg = 5 [default = 0]; // padding at begin of input
optional int32 pad_end = 6 [default = 0]; // padding at end of input
}
7.在网络结构中添加Interp层,代码如下:
layer{
bottom:"input"
top:"output"
name:"interp_layer"
type:"Interp"
interp_param{ //注意可按需求改为interp_param{height:60 width:60}(即固定特征图的尺寸),也可以不需要这个interp_param参数
shrink_factor:4
zoom_factor:3
pad_beg:0
pad_end:0
}
}
第二步,添加完相应的代码,则进行编译
1.因为修改过caffe.proto,所以需要重新编译proto,需要先安装protobuf,安装之后需要编译caffe.proto文件,生成caffe.pb.h和caffe.pb.cc文件。编译过程如下:
# 确定protobuf的版本
$ protoc --version
libprotoc 2.5.0 # 编译caffe.proto,需要先进入src/caffe/proto目录下,也可以不进入,指定路径
$ protoc -I=./ --cpp_out=./ ./caffe.proto # 查看编译结果
$ ls
caffe.pb.cc caffe.pb.h caffe.proto
2.编译caffe,退到caffe路径下,编译:
//确保每一步都成功执行
make clean
make -j8
make pycaffe
...大体上是这样一个流程,编译过程中遇到相应问题再百度,有错误的地方欢迎大家指正。
版权声明:
作者:王老头
出处:http://www.cnblogs.com/wmr95/p/8715607.html
本文版权归作者和博客园共有,欢迎转载,但未经作者同意必须保留此段声明,并在文章页面明显位置给出原文链接,否则,作者将保留追究法律责任的权利。
Caffe中Interp层的使用的更多相关文章
- caffe中各层的作用:
关于caffe中的solver: cafffe中的sover的方法都有: Stochastic Gradient Descent (type: "SGD"), AdaDelta ( ...
- TensorFlow与caffe中卷积层feature map大小计算
刚刚接触Tensorflow,由于是做图像处理,因此接触比较多的还是卷及神经网络,其中会涉及到在经过卷积层或者pooling层之后,图像Feature map的大小计算,之前一直以为是与caffe相同 ...
- caffe中卷积层和pooling层计算下一层的特征map的大小
pool层,其中ceil是向上取整函数 卷积层:
- (原)torch和caffe中的BatchNorm层
转载请注明出处: http://www.cnblogs.com/darkknightzh/p/6015990.html BatchNorm具体网上搜索. caffe中batchNorm层是通过Batc ...
- caffe 中 python 数据层
caffe中大多数层用C++写成. 但是对于自己数据的输入要写对应的输入层,比如你要去图像中的一部分,不能用LMDB,或者你的label 需要特殊的标记. 这时候就需要用python 写一个输入层. ...
- caffe中关于(ReLU层,Dropout层,BatchNorm层,Scale层)输入输出层一致的问题
在卷积神经网络中.常见到的激活函数有Relu层 layer { name: "relu1" type: "ReLU" bottom: "pool1&q ...
- 【神经网络与深度学习】如何在Caffe中配置每一个层的结构
如何在Caffe中配置每一个层的结构 最近刚在电脑上装好Caffe,由于神经网络中有不同的层结构,不同类型的层又有不同的参数,所有就根据Caffe官网的说明文档做了一个简单的总结. 1. Vision ...
- caffe中关于数据进行预处理的方式
caffe的数据层layer中再载入数据时,会先要对数据进行预处理.一般处理的方式有两种: 1. 使用均值处理 transform_param { mirror: true crop_size: me ...
- 在caffe中使用hdf5的数据
caffe默认使用的数据格式为lmdb文件格式,它提供了把图片转为lmdb文件格式的小程序,但是呢,我的数据为一维的数据,我也要分类啊,那我怎么办?肯定有办法可以转为lmdb文件格式的,我也看了一些源 ...
随机推荐
- CSS组合选择符
组合选择符说明了两个选择器直接的关系. 目录: 后代选取器(以空格分隔) 子元素选择器(以大于号分隔) 相邻兄弟选择器(以加号分隔) 普通兄弟选择器(以破折号分隔) 后代选取器 后代选取器匹配所有值得 ...
- 基于Groovy+HttpRestful的超轻量级的接口测试用例配置的设计方案及DEMO实现
目标 设计一个轻量级测试用例框架,接口测试编写者只需要编写测试用例相关的内容(入参及结果校验),不需要理会系统的实现,不需要写跟测试校验无关的内容. 思路 测试用例分析 一个用例由以下部分组成: (1 ...
- Windows 服务器自动重启定位
有个非常好的小技巧,就是在服务器端命令行,执行systeminfo,能查到服务器上一次重启的时间,依照这个时间在Event Log里再找相应的日志就容易多了. 补充:还能查到这台服务器是虚拟机还是实体 ...
- autotools源文件相同/不同目录下
关于Autotools 我们前面的章节中已经讲到了Makefile的使用(点击进入查看文章).我们知道在Linux下面如果编译一个比较大型的项目,我们可以通过Makefile的方式来完成. 但是,我们 ...
- 基于Docker搭建MySQL多源复制环境
MySQL5.7在主从复制上面相对之前版本多了一些新特性,包括多源复制.基于组提交的并行复制.在线修改Replication Filter.GTID增强.半同步复制增强等. 多源复制:多源复制加入了一 ...
- index read-only
系统重启后,Eleastisearch6.5.0在给 Eleastisearch 更新索引的时候报了一个错误:ClusterBlockException[blocked by: [FORBIDDEN/ ...
- Linux系统组成
1 系统组成 BootLoader:操作系统引导程序 内核: 文件系统:应用程序(用户开发的.网上下载的) 2 安装USB驱动 dongry@d-linux:~$ insmod usb_dnw.ko ...
- Docker部署Bytom全节点钱包
微服务和容器目前比较流行,相信很多小伙伴都比较熟悉docker, 如果你不是太了解,可以查看文档docker学习手册.那如何用docker搭建比原链(Bytom)的节点呢? 在操作之前,请自行安装do ...
- NVIDIA 驱动安装(超详细)
目录 1. 删除原有驱动 2. 安装依赖 3. 禁用nouveau驱动: 4. reboot 5. 获取kernel source (important) 6. 关掉x graphic 服务 7. 安 ...
- Zedboard初体验
前言 这是我学习Zedboard时做的笔记 Zedboard是什么 Zedboard是Xilinx公司推出的搭载了Zynq芯片的开发板,其中Zynq芯片采用全新的设计理念,将ARM处理器嵌入FPGA可 ...