MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
GlobResNormLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
10{
20 public class GlobResNormLayer<T> : Layer<T>
21 {
22 Blob<T> m_blobSumMultiplier;
23 Blob<T> m_blobSquare;
24 Blob<T> m_blobNorm;
25 Blob<T> m_blobTempDot;
26
35 : base(cuda, log, p)
36 {
37 m_type = LayerParameter.LayerType.GLOBRES_NORM;
38 m_blobSumMultiplier = new common.Blob<T>(cuda, log, false);
39 m_blobSumMultiplier.Name = m_param.name + " sum mult";
40 m_blobSquare = new common.Blob<T>(cuda, log, false);
41 m_blobSquare.Name = m_param.name + " sqr";
42 m_blobNorm = new common.Blob<T>(cuda, log, false);
43 m_blobNorm.Name = m_param.name + " norm";
44 m_blobTempDot = new common.Blob<T>(cuda, log, false);
45 m_blobTempDot.Name = m_param.name + " temp dot";
46 }
47
49 protected override void dispose()
50 {
51 base.dispose();
52 dispose(ref m_blobSumMultiplier);
53 dispose(ref m_blobSquare);
54 dispose(ref m_blobNorm);
55 dispose(ref m_blobTempDot);
56 }
57
59 protected override void setup_internal_blobs(BlobCollection<T> col)
60 {
61 if (col.Count > 0)
62 return;
63
64 col.Add(m_blobSquare);
65 col.Add(m_blobNorm);
66 col.Add(m_blobTempDot);
67 col.Add(m_blobSumMultiplier);
68 }
69
73 public override int ExactNumBottomBlobs
74 {
75 get { return 1; }
76 }
77
81 public override int ExactNumTopBlobs
82 {
83 get { return 1; }
84 }
85
91 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
92 {
93 }
94
100 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
101 {
102 colTop[0].ReshapeLike(colBottom[0]);
103
104 m_blobSumMultiplier.Reshape(1, colBottom[0].channels, 1, 1);
105 m_blobSumMultiplier.SetData(1.0);
106 m_blobSquare.ReshapeLike(colBottom[0]);
107 m_blobNorm.Reshape(colBottom[0].num, 1, colBottom[0].height, colBottom[0].width);
108 m_blobTempDot.Reshape(colBottom[0].num, 1, colBottom[0].height, colBottom[0].width);
109 }
110
118 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
119 {
120 long hBottomData = colBottom[0].gpu_data;
121 long hTopData = colTop[0].mutable_gpu_data;
122 long hSquareData = m_blobSquare.mutable_gpu_data;
123 long hNormData = m_blobNorm.mutable_gpu_data;
124 int nCount = colBottom[0].count();
125 int nNum = colBottom[0].num;
126 int nChannels = colBottom[0].channels;
127 int nSpatialDim = colBottom[0].height * colBottom[0].width;
128
129 m_cuda.copy(nCount, hBottomData, hTopData);
130 m_cuda.copy(nCount, hBottomData, hSquareData);
131
132 // square
133 m_cuda.powx(nCount, hSquareData, 2.0, hSquareData);
134
135 // sum across channel
136 m_cuda.channel_sum(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hSquareData, hNormData);
137
138 // square root
139 m_cuda.powx(nNum * nSpatialDim, hNormData, 0.5, hNormData);
140
141 // divide
142 m_cuda.channel_div(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hNormData, hTopData, 2);
143 }
144
153 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
154 {
155 long hTopDiff = colTop[0].gpu_diff;
156 long hTopData = colTop[0].gpu_data;
157 long hBottomDiff = colBottom[0].mutable_gpu_diff;
158 long hBottomData = colBottom[0].gpu_data;
159 long hNormData = m_blobNorm.gpu_data;
160 long hTempDotData = m_blobTempDot.mutable_gpu_data;
161 long hTempData = m_blobSquare.mutable_gpu_data;
162 int nNum = colTop[0].num;
163 int nChannels = colTop[0].channels;
164 int nSpatialDim = colTop[0].height * colTop[0].width;
165 int nCount = colTop[0].count();
166
167 m_cuda.copy(nCount, hTopDiff, hBottomDiff);
168 m_cuda.copy(nCount, hBottomData, hTempData);
169
170 // b_diff = t_diff / norm - dot(t_diff, t_data) / (norm)^2 * bottom_data
171 // temp_dot_data = dot(t_diff, t_data)
172 m_cuda.channel_dot(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hTopDiff, hTopData, hTempDotData);
173
174 // temp_dot_data /= (norm)^2
175 m_cuda.div(nNum * nSpatialDim, hTempDotData, hNormData, hTempDotData);
176 m_cuda.div(nNum * nSpatialDim, hTempDotData, hNormData, hTempDotData);
177
178 // bottom_diff = top_diff, bottom_diff /= norm
179 m_cuda.channel_div(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hNormData, hBottomDiff, 2);
180
181 // temp_data = bottom_data, temp_data *= temp_dot_data
182 m_cuda.channel_mul(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hTempDotData, hTempData, 2);
183
184 // bottom_diff += -temp_data
185 m_cuda.axpy(nCount, -1.0, hTempData, hBottomDiff);
186 }
187 }
188}
The Log class provides general output in text form.
Definition: Log.cs:13
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
Blob(CudaDnn< T > cuda, Log log, bool bIncludeDiff=true, bool bUseHalfSize=false)
The Blob constructor.
Definition: Blob.cs:64
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
Definition: Blob.cs:648
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The GRNLayer performs an L2 normalization over the input data.
GlobResNormLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The GRNLayer constructor.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: norm
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: data
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.beta namespace contains all beta stage layers.
Definition: LayerFactory.cs:9
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12