MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
ReLULayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.common;
6using MyCaffe.basecode;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
26 public class ReLULayer<T> : NeuronLayer<T>
27 {
28 long m_hCudnn = 0;
29 long m_hBottomDesc = 0;
30 long m_hTopDesc = 0;
31
44 : base(cuda, log, p)
45 {
47 }
48
50 protected override void dispose()
51 {
52 if (m_hBottomDesc != 0)
53 {
54 m_cuda.FreeTensorDesc(m_hBottomDesc);
55 m_hBottomDesc = 0;
56 }
57
58 if (m_hTopDesc != 0)
59 {
60 m_cuda.FreeTensorDesc(m_hTopDesc);
61 m_hTopDesc = 0;
62 }
63
64 if (m_hCudnn != 0)
65 {
66 m_cuda.FreeCuDNN(m_hCudnn);
67 m_hCudnn = 0;
68 }
69
70 base.dispose();
71 }
72
78 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
79 {
81 return;
82
83 // Setup the convert to half flags used by the Layer just before calling forward and backward.
85
86 // Initialize CuDNN
87 m_hCudnn = m_cuda.CreateCuDNN();
88 m_hBottomDesc = m_cuda.CreateTensorDesc();
89 m_hTopDesc = m_cuda.CreateTensorDesc();
90 }
91
97 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
98 {
99 base.Reshape(colBottom, colTop);
100 if (!reshapeNeeded(colBottom, colTop, false))
101 return;
102
104 return;
105
106 int nN = colBottom[0].num;
107 int nK = colBottom[0].channels;
108 int nH = colBottom[0].height;
109 int nW = colBottom[0].width;
110
111 m_cuda.SetTensorDesc(m_hBottomDesc, nN, nK, nH, nW, m_bUseHalfSize);
112 m_cuda.SetTensorDesc(m_hTopDesc, nN, nK, nH, nW, m_bUseHalfSize);
113 }
114
125 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
126 {
128 forward_cuda(colBottom, colTop);
129 else
130 forward_cudnn(colBottom, colTop);
131 }
132
150 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
151 {
153 backward_cuda(colTop, rgbPropagateDown, colBottom);
154 else
155 backward_cudnn(colTop, rgbPropagateDown, colBottom);
156 }
157
168 protected void forward_cuda(BlobCollection<T> colBottom, BlobCollection<T> colTop)
169 {
170 long hBottomData = colBottom[0].gpu_data;
171 long hTopData = colTop[0].mutable_gpu_data;
172 int nCount = colBottom[0].count();
173 T fNegativeSlope = (T)Convert.ChangeType(m_param.relu_param.negative_slope, typeof(T));
174
175 m_cuda.relu_fwd(nCount, hBottomData, hTopData, fNegativeSlope);
176 }
177
195 protected void backward_cuda(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
196 {
197 long hTopData = colTop[0].gpu_data;
198 long hTopDiff = colTop[0].gpu_diff;
199 long hBottomDiff = colBottom[0].mutable_gpu_diff;
200 int nCount = colBottom[0].count();
201 T fNegativeSlope = (T)Convert.ChangeType(m_param.relu_param.negative_slope, typeof(T));
202
203 m_cuda.relu_bwd(nCount, hTopDiff, hTopData, hBottomDiff, fNegativeSlope);
204 }
205
216 protected void forward_cudnn(BlobCollection<T> colBottom, BlobCollection<T> colTop)
217 {
218 long hBottomData = colBottom[0].gpu_data;
219 long hTopData = colTop[0].mutable_gpu_data;
220
221 m_cuda.ReLUForward(m_hCudnn, m_tOne, m_hBottomDesc, hBottomData, m_tZero, m_hTopDesc, hTopData);
222 }
223
241 protected void backward_cudnn(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
242 {
243 if (!rgbPropagateDown[0])
244 return;
245
246 long hTopData = colTop[0].gpu_data;
247 long hTopDiff = colTop[0].gpu_diff;
248 long hBottomData = colBottom[0].gpu_data;
249 long hBottomDiff = colBottom[0].mutable_gpu_diff;
250
251 m_cuda.ReLUBackward(m_hCudnn, m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData, m_tZero, m_hBottomDesc, hBottomDiff);
252 }
253 }
254}
The Log class provides general output in text form.
Definition: Log.cs:13
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
Definition: Layer.cs:84
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
Definition: Layer.cs:622
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
Definition: NeuronLayer.cs:22
The ReLULayer computes the "Rectifier Linear Unit" ReLULayer non-linearity, a classic for neural netw...
Definition: ReLULayer.cs:27
ReLULayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ReLULayer constructor.
Definition: ReLULayer.cs:43
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using either the Engine.CAFFE or Engine....
Definition: ReLULayer.cs:150
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: ReLULayer.cs:50
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CAFFE mode.
Definition: ReLULayer.cs:168
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using the Engine.CUDNN mode.
Definition: ReLULayer.cs:241
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: ReLULayer.cs:97
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CUDNN mode.
Definition: ReLULayer.cs:216
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using the Engine.CAFFE mode.
Definition: ReLULayer.cs:195
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
Definition: ReLULayer.cs:78
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
Definition: ReLULayer.cs:125
Specifies the base parameter for all layers.
ReLUParameter relu_param
Returns the parameter set when initialized with LayerType.RELU
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
double negative_slope
Specifies the negative slope. Allow non-zero slope for negative inputs to speed up optimization.
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12