MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
ELULayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
26 public class ELULayer<T> : NeuronLayer<T>
27 {
28 long m_hCudnn = 0;
29 long m_hBottomDesc = 0;
30 long m_hTopDesc = 0;
31
43 : base(cuda, log, p)
44 {
46 }
47
49 protected override void dispose()
50 {
51 if (m_hBottomDesc != 0)
52 {
53 m_cuda.FreeTensorDesc(m_hBottomDesc);
54 m_hBottomDesc = 0;
55 }
56
57 if (m_hTopDesc != 0)
58 {
59 m_cuda.FreeTensorDesc(m_hTopDesc);
60 m_hTopDesc = 0;
61 }
62
63 if (m_hCudnn != 0)
64 {
65 m_cuda.FreeCuDNN(m_hCudnn);
66 m_hCudnn = 0;
67 }
68
69 base.dispose();
70 }
71
77 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
78 {
80 return;
81
82 // Initialize CuDNN
83 m_hCudnn = m_cuda.CreateCuDNN();
84 m_hBottomDesc = m_cuda.CreateTensorDesc();
85 m_hTopDesc = m_cuda.CreateTensorDesc();
86 }
87
93 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
94 {
95 base.Reshape(colBottom, colTop);
96 if (!reshapeNeeded(colBottom, colTop, false))
97 return;
98
100 return;
101
102 int nN = colBottom[0].num;
103 int nK = colBottom[0].channels;
104 int nH = colBottom[0].height;
105 int nW = colBottom[0].width;
106
107 m_cuda.SetTensorDesc(m_hBottomDesc, nN, nK, nH, nW);
108 m_cuda.SetTensorDesc(m_hTopDesc, nN, nK, nH, nW);
109 }
110
122 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
123 {
125 forward_cuda(colBottom, colTop);
126 else
127 forward_cudnn(colBottom, colTop);
128 }
129
149 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
150 {
152 backward_cuda(colTop, rgbPropagateDown, colBottom);
153 else
154 backward_cudnn(colTop, rgbPropagateDown, colBottom);
155 }
156
157
178 protected void forward_cuda(BlobCollection<T> colBottom, BlobCollection<T> colTop)
179 {
180 int nCount = colTop[0].count();
181 long hBottomData = colBottom[0].gpu_data;
182 long hTopData = colTop[0].mutable_gpu_data;
183 double dfAlpha = m_param.elu_param.alpha;
184
185 m_cuda.elu_fwd(nCount, hBottomData, hTopData, dfAlpha);
186 }
187
207 protected void backward_cuda(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
208 {
209 if (!rgbPropagateDown[0])
210 return;
211
212 int nCount = colTop[0].count();
213 long hTopDiff = colTop[0].gpu_diff;
214 long hTopData = colTop[0].gpu_data;
215 long hBottomData = colBottom[0].gpu_data;
216 long hBottomDiff = colBottom[0].mutable_gpu_diff;
217 double dfAlpha = m_param.elu_param.alpha;
218
219 m_cuda.elu_bwd(nCount, hTopDiff, hTopData, hBottomData, hBottomDiff, dfAlpha);
220 }
221
242 protected void forward_cudnn(BlobCollection<T> colBottom, BlobCollection<T> colTop)
243 {
244 long hBottomData = colBottom[0].gpu_data;
245 long hTopData = colTop[0].mutable_gpu_data;
246
247 m_cuda.EluForward(m_hCudnn, m_tOne, m_hBottomDesc, hBottomData, m_tZero, m_hTopDesc, hTopData);
248 }
249
269 protected void backward_cudnn(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
270 {
271 if (!rgbPropagateDown[0])
272 return;
273
274 long hTopData = colTop[0].gpu_data;
275 long hTopDiff = colTop[0].gpu_diff;
276 long hBottomData = colBottom[0].gpu_data;
277 long hBottomDiff = colBottom[0].mutable_gpu_diff;
278
279 m_cuda.EluBackward(m_hCudnn, m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData, m_tZero, m_hBottomDesc, hBottomDiff);
280 }
281 }
282}
The Log class provides general output in text form.
Definition: Log.cs:13
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
The ELULayer computes exponential linear unit non-linearity . This layer is initialized with the MyCa...
Definition: ELULayer.cs:27
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
Definition: ELULayer.cs:122
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation using cuDNN.
Definition: ELULayer.cs:242
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: ELULayer.cs:49
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the ELU value inputs.
Definition: ELULayer.cs:269
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using either the Engine.CAFFE or Engine....
Definition: ELULayer.cs:149
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
Definition: ELULayer.cs:77
ELULayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ELULayer constructor
Definition: ELULayer.cs:42
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation using Cuda.
Definition: ELULayer.cs:178
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the ELU value inputs.
Definition: ELULayer.cs:207
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: ELULayer.cs:93
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
Definition: Layer.cs:622
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
Definition: NeuronLayer.cs:22
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
Definition: EluParameter.cs:48
double alpha
Described in Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) by Clevert,...
Definition: EluParameter.cs:64
Specifies the base parameter for all layers.
EluParameter elu_param
Returns the parameter set when initialized with LayerType.ELU
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12