MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
TanhLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
27 public class TanhLayer<T> : NeuronLayer<T>
28 {
29 long m_hCudnn = 0;
30 long m_hBottomDesc = 0;
31 long m_hTopDesc = 0;
32
33
44 : base(cuda, log, p)
45 {
47 }
48
50 protected override void dispose()
51 {
52 if (m_hBottomDesc != 0)
53 {
54 m_cuda.FreeTensorDesc(m_hBottomDesc);
55 m_hBottomDesc = 0;
56 }
57
58 if (m_hTopDesc != 0)
59 {
60 m_cuda.FreeTensorDesc(m_hTopDesc);
61 m_hTopDesc = 0;
62 }
63
64 if (m_hCudnn != 0)
65 {
66 m_cuda.FreeCuDNN(m_hCudnn);
67 m_hCudnn = 0;
68 }
69
70 base.dispose();
71 }
72
78 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
79 {
81 return;
82
83 // Setup the convert to half flags used by the Layer just before calling forward and backward.
85
86 // Initialize CuDNN
87 m_hCudnn = m_cuda.CreateCuDNN();
88 m_hBottomDesc = m_cuda.CreateTensorDesc();
89 m_hTopDesc = m_cuda.CreateTensorDesc();
90 }
91
97 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
98 {
99 base.Reshape(colBottom, colTop);
100 if (!reshapeNeeded(colBottom, colTop, false))
101 return;
102
104 return;
105
106 int nN = colBottom[0].num;
107 int nK = colBottom[0].channels;
108 int nH = colBottom[0].height;
109 int nW = colBottom[0].width;
110
111 m_cuda.SetTensorDesc(m_hBottomDesc, nN, nK, nH, nW, m_bUseHalfSize);
112 m_cuda.SetTensorDesc(m_hTopDesc, nN, nK, nH, nW, m_bUseHalfSize);
113 }
114
126 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
127 {
129 forward_cuda(colBottom, colTop);
130 else
131 forward_cudnn(colBottom, colTop);
132 }
133
153 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
154 {
156 backward_cuda(colTop, rgbPropagateDown, colBottom);
157 else
158 backward_cudnn(colTop, rgbPropagateDown, colBottom);
159 }
160
172 protected void forward_cuda(BlobCollection<T> colBottom, BlobCollection<T> colTop)
173 {
174 long hBottomData = colBottom[0].gpu_data;
175 long hTopData = colTop[0].mutable_gpu_data;
176 int nCount = colBottom[0].count();
177
178 m_cuda.tanh_fwd(nCount, hBottomData, hTopData);
179 }
180
200 protected void backward_cuda(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
201 {
202 long hTopData = colTop[0].gpu_data;
203 long hTopDiff = colTop[0].gpu_diff;
204 long hBottomDiff = colBottom[0].mutable_gpu_diff;
205 int nCount = colBottom[0].count();
206
207 m_cuda.tanh_bwd(nCount, hTopDiff, hTopData, hBottomDiff);
208 }
209
221 protected void forward_cudnn(BlobCollection<T> colBottom, BlobCollection<T> colTop)
222 {
223 long hBottomData = colBottom[0].gpu_data;
224 long hTopData = colTop[0].mutable_gpu_data;
225
226 m_cuda.TanhForward(m_hCudnn, m_tOne, m_hBottomDesc, hBottomData, m_tZero, m_hTopDesc, hTopData);
227 }
228
248 protected void backward_cudnn(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
249 {
250 if (!rgbPropagateDown[0])
251 return;
252
253 long hTopData = colTop[0].gpu_data;
254 long hTopDiff = colTop[0].gpu_diff;
255 long hBottomData = colBottom[0].gpu_data;
256 long hBottomDiff = colBottom[0].mutable_gpu_diff;
257
258 m_cuda.TanhBackward(m_hCudnn, m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData, m_tZero, m_hBottomDesc, hBottomDiff);
259 }
260 }
261}
The Log class provides general output in text form.
Definition: Log.cs:13
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
Definition: Layer.cs:84
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
Definition: Layer.cs:622
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
Definition: NeuronLayer.cs:22
The TanhLayer is a neuron layer that calculates the tanh function, popular with auto-encoders....
Definition: TanhLayer.cs:28
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CUDNN mode.
Definition: TanhLayer.cs:221
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using either the Engine.CAFFE or Engine....
Definition: TanhLayer.cs:153
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CAFFE mode.
Definition: TanhLayer.cs:172
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: TanhLayer.cs:50
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: TanhLayer.cs:97
TanhLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The TanhLayer constructor.
Definition: TanhLayer.cs:43
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CAFFE mode.
Definition: TanhLayer.cs:200
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
Definition: TanhLayer.cs:78
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
Definition: TanhLayer.cs:126
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CUDNN mode.
Definition: TanhLayer.cs:248
Specifies the base parameter for all layers.
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
TanhParameter tanh_param
Returns the parameter set when initialized with LayerType.TANH
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12