MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
LRNLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
20 public class LRNLayer<T> : Layer<T>
21 {
22 int m_nSize;
23 int m_nPrePad;
24 double m_dfAlpha;
25 double m_dfBeta;
26 double m_dfK;
27 int m_nNum;
28 int m_nChannels;
29 int m_nHeight;
30 int m_nWidth;
31
32 // Fields used for normalization ACROSS_CHANNELS
33 // scale_ stores the intermediate summing results.
34 Blob<T> m_blobScale;
35
36 // Fields for normalization WITHIN_CHANNEL
37 SplitLayer<T> m_splitLayer;
38 BlobCollection<T> m_colSplitTopVec = new BlobCollection<T>();
39 PowerLayer<T> m_squareLayer;
40 Blob<T> m_blobSquareInput;
41 Blob<T> m_blobSquareOutput;
42 BlobCollection<T> m_colSquareBottomVec = new BlobCollection<T>();
43 BlobCollection<T> m_colSquareTopVec = new BlobCollection<T>();
44 PoolingLayer<T> m_poolLayer;
45 Blob<T> m_blobPoolOutput;
46 BlobCollection<T> m_colPoolTopVec = new BlobCollection<T>();
47 PowerLayer<T> m_powerLayer;
48 Blob<T> m_blobPowerOutput;
49 BlobCollection<T> m_colPowerTopVec = new BlobCollection<T>();
50 EltwiseLayer<T> m_productLayer;
51 Blob<T> m_blobProductInput;
52 BlobCollection<T> m_colProductBottomVec = new BlobCollection<T>();
53
54 // cuDnn - lrn
55 long m_hCuDnn = 0;
56 long m_hNormDesc = 0;
57 long m_hBottomDesc = 0;
58 long m_hTopDesc = 0;
59
60 // cuDnn - lcn
61 int m_nTempDataSize;
62 long m_hTempData1;
63 long m_hTempData2;
64
85 : base(cuda, log, p)
86 {
88 m_blobScale = new Blob<T>(cuda, log);
89 m_blobScale.Name = m_param.name + " scale";
90 m_blobSquareInput = new Blob<T>(cuda, log);
91 m_blobSquareInput.Name = m_param.name + " sqin";
92 m_blobSquareOutput = new Blob<T>(cuda, log);
93 m_blobSquareOutput.Name = m_param.name + " sqout";
94 m_blobPoolOutput = new Blob<T>(cuda, log);
95 m_blobPoolOutput.Name = m_param.name + " poolout";
96 m_blobPowerOutput = new Blob<T>(cuda, log);
97 m_blobPowerOutput.Name = m_param.name + " powout";
98 m_blobProductInput = new Blob<T>(cuda, log);
99 m_blobProductInput.Name = m_param.name + " prodin";
100 }
101
103 protected override void dispose()
104 {
105 m_blobScale.Dispose();
106 m_blobSquareInput.Dispose();
107 m_blobSquareOutput.Dispose();
108 m_blobPoolOutput.Dispose();
109 m_blobPowerOutput.Dispose();
110 m_blobProductInput.Dispose();
111
112 if (m_splitLayer != null)
113 {
114 m_splitLayer.Dispose();
115 m_splitLayer = null;
116 }
117
118 if (m_squareLayer != null)
119 {
120 m_squareLayer.Dispose();
121 m_squareLayer = null;
122 }
123
124 if (m_poolLayer != null)
125 {
126 m_poolLayer.Dispose();
127 m_poolLayer = null;
128 }
129
130 if (m_powerLayer != null)
131 {
132 m_powerLayer.Dispose();
133 m_powerLayer = null;
134 }
135
136 if (m_powerLayer != null)
137 {
138 m_productLayer.Dispose();
139 m_powerLayer = null;
140 }
141
142 if (m_hNormDesc != 0)
143 {
144 m_cuda.FreeLRNDesc(m_hNormDesc);
145 m_hNormDesc = 0;
146 }
147
148 if (m_hBottomDesc != 0)
149 {
150 m_cuda.FreeTensorDesc(m_hBottomDesc);
151 m_hBottomDesc = 0;
152 }
153
154 if (m_hTopDesc != 0)
155 {
156 m_cuda.FreeTensorDesc(m_hTopDesc);
157 m_hTopDesc = 0;
158 }
159
160 if (m_hCuDnn != 0)
161 {
162 m_cuda.FreeCuDNN(m_hCuDnn);
163 m_hCuDnn = 0;
164 }
165
166 if (m_hTempData1 != 0)
167 {
168 m_cuda.FreeMemory(m_hTempData1);
169 m_hTempData1 = 0;
170 }
171
172 if (m_hTempData2 != 0)
173 {
174 m_cuda.FreeMemory(m_hTempData2);
175 m_hTempData2 = 0;
176 }
177
178 base.dispose();
179 }
180
182 protected override void setup_internal_blobs(BlobCollection<T> col)
183 {
184 if (col.Count > 0)
185 return;
186
187 if (col.Count > 0)
188 return;
189
191 {
192 if (m_param.lrn_param.norm_region == LRNParameter.NormRegion.ACROSS_CHANNELS)
193 {
194 col.Add(m_blobScale);
195 }
196 else
197 {
198 col.Add(m_blobSquareInput);
199 col.Add(m_blobSquareOutput);
200 col.Add(m_blobPoolOutput);
201 col.Add(m_blobPowerOutput);
202 col.Add(m_blobProductInput);
203 }
204 }
205 }
206
210 public override int ExactNumBottomBlobs
211 {
212 get { return 1; }
213 }
214
218 public override int ExactNumTopBlobs
219 {
220 get { return 1; }
221 }
222
228 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
229 {
230 m_nSize = (int)m_param.lrn_param.local_size;
231 m_log.CHECK_EQ(m_nSize % 2, 1, "LRN only supports odd values for local_size.");
232 m_nPrePad = (m_nSize - 1) / 2;
233 m_dfAlpha = m_param.lrn_param.alpha;
234 m_dfBeta = m_param.lrn_param.beta;
235 m_dfK = m_param.lrn_param.k;
236
238 {
239 // Set up split_layer to use in the numerator and denominator.
240 m_colSplitTopVec = new BlobCollection<T>();
241 m_colSplitTopVec.Add(m_blobProductInput);
242 m_colSplitTopVec.Add(m_blobSquareInput);
243 LayerParameter split_param = new LayerParameter(LayerParameter.LayerType.SPLIT, "split");
244 m_splitLayer = new SplitLayer<T>(m_cuda, m_log, split_param);
245 m_splitLayer.Setup(colBottom, m_colSplitTopVec);
246
247 // Set up square_layer to square teh inputs.
248 m_colSquareBottomVec = new BlobCollection<T>();
249 m_colSquareTopVec = new BlobCollection<T>();
250 m_colSquareBottomVec.Add(m_blobSquareInput);
251 m_colSquareTopVec.Add(m_blobSquareOutput);
252 LayerParameter square_param = new LayerParameter(LayerParameter.LayerType.POWER, "square");
253 square_param.power_param.power = 2.0;
254 m_squareLayer = new PowerLayer<T>(m_cuda, m_log, square_param);
255 m_squareLayer.Setup(m_colSquareBottomVec, m_colSquareTopVec);
256
257 // Set up pool_layer to sum over square neighborhoods of the input.
258 m_colPoolTopVec = new BlobCollection<T>();
259 m_colPoolTopVec.Add(m_blobPoolOutput);
260 LayerParameter pool_param = new LayerParameter(LayerParameter.LayerType.POOLING, "pool");
262 pool_param.pooling_param.pad.Add((uint)m_nPrePad);
263 pool_param.pooling_param.kernel_size.Add((uint)m_nSize);
264 m_poolLayer = new PoolingLayer<T>(m_cuda, m_log, pool_param);
265 m_poolLayer.Setup(m_colSquareTopVec, m_colPoolTopVec);
266
267 // Set up power_layer to compute (1 + alpha/N^2 s)^-beta, where s is
268 // the sum of the squared neighborhood (the output of pool_layer)
269 m_colPowerTopVec = new BlobCollection<T>();
270 m_colPowerTopVec.Add(m_blobPowerOutput);
271 LayerParameter power_param = new LayerParameter(LayerParameter.LayerType.POWER, "power");
272 power_param.power_param.power = -m_dfBeta;
273 power_param.power_param.scale = m_dfAlpha;
274 power_param.power_param.shift = 1.0;
275 m_powerLayer = new PowerLayer<T>(m_cuda, m_log, power_param);
276 m_powerLayer.Setup(m_colPoolTopVec, m_colPowerTopVec);
277
278 // Set up a product_layer to compute outputs by multiplying inputs by the
279 // inverse denominator computed by the power layer.
280 m_colProductBottomVec = new BlobCollection<T>();
281 m_colProductBottomVec.Add(m_blobProductInput);
282 m_colProductBottomVec.Add(m_blobPowerOutput);
283 LayerParameter product_param = new LayerParameter(LayerParameter.LayerType.ELTWISE, "product");
285 m_productLayer = new EltwiseLayer<T>(m_cuda, m_log, product_param);
286 m_productLayer.Setup(m_colProductBottomVec, colTop);
287 }
288
290 return;
291
292 // Setup the convert to half flags used by the Layer just before calling forward and backward.
294
295 m_hCuDnn = m_cuda.CreateCuDNN();
296 m_hNormDesc = m_cuda.CreateLRNDesc();
297 m_hBottomDesc = m_cuda.CreateTensorDesc();
298 m_hTopDesc = m_cuda.CreateTensorDesc();
299 }
300
306 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
307 {
308 m_log.CHECK_EQ(4, colBottom[0].num_axes, "Input must have 4 axes, corresponding to (num, channels, height, width)");
309 m_nNum = colBottom[0].num;
310 m_nChannels = colBottom[0].channels;
311 m_nHeight = colBottom[0].height;
312 m_nWidth = colBottom[0].width;
313
315 {
316 case LRNParameter.NormRegion.ACROSS_CHANNELS:
317 colTop[0].Reshape(m_nNum, m_nChannels, m_nHeight, m_nWidth);
319 m_blobScale.Reshape(m_nNum, m_nChannels, m_nHeight, m_nWidth);
320 break;
321
322 case LRNParameter.NormRegion.WITHIN_CHANNEL:
323 m_splitLayer.Reshape(colBottom, m_colSplitTopVec);
324 m_squareLayer.Reshape(m_colSquareBottomVec, m_colSquareTopVec);
325 m_poolLayer.Reshape(m_colSquareTopVec, m_colPoolTopVec);
326 m_powerLayer.Reshape(m_colPoolTopVec, m_colPowerTopVec);
327 m_productLayer.Reshape(m_colProductBottomVec, colTop);
328 break;
329 }
330
332 return;
333
334 m_cuda.SetTensorDesc(m_hBottomDesc, m_nNum, m_nChannels, m_nHeight, m_nWidth);
335 m_cuda.SetTensorDesc(m_hTopDesc, m_nNum, m_nChannels, m_nHeight, m_nWidth);
336 m_cuda.SetLRNDesc(m_hNormDesc, (uint)m_nSize, m_dfAlpha, m_dfBeta, m_dfK);
337
339 {
340 int nTotalSize = m_nNum * m_nChannels * m_nHeight * m_nWidth;
341
342 if (nTotalSize > m_nTempDataSize)
343 {
344 if (m_hTempData1 != 0)
345 {
346 m_cuda.FreeMemory(m_hTempData1);
347 m_hTempData1 = 0;
348 }
349
350 if (m_hTempData2 != 0)
351 {
352 m_cuda.FreeMemory(m_hTempData2);
353 m_hTempData2 = 0;
354 }
355
356 m_hTempData1 = m_cuda.AllocMemory(nTotalSize);
357 m_hTempData2 = m_cuda.AllocMemory(nTotalSize);
358 m_nTempDataSize = nTotalSize;
359 }
360 }
361 }
362
374 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
375 {
377 forward_cuda(colBottom, colTop);
378 else
379 forward_cudnn(colBottom, colTop);
380 }
381
394 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
395 {
397 backward_cuda(colTop, rgbPropagateDown, colBottom);
398 else
399 backward_cudnn(colTop, rgbPropagateDown, colBottom);
400 }
401
413 protected void forward_cuda(BlobCollection<T> colBottom, BlobCollection<T> colTop)
414 {
416 {
417 case LRNParameter.NormRegion.ACROSS_CHANNELS:
418 CrossChannelForward(colBottom, colTop);
419 break;
420
421 case LRNParameter.NormRegion.WITHIN_CHANNEL:
422 WithinChannelForward(colBottom, colTop);
423 break;
424
425 default:
426 m_log.FAIL("Unknown normalization region.");
427 break;
428 }
429 }
430
443 protected void backward_cuda(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
444 {
446 {
447 case LRNParameter.NormRegion.ACROSS_CHANNELS:
448 CrossChannelBackward(colTop, rgbPropagateDown, colBottom);
449 break;
450
451 case LRNParameter.NormRegion.WITHIN_CHANNEL:
452 WithinChannelBackward(colTop, rgbPropagateDown, colBottom);
453 break;
454
455 default:
456 m_log.FAIL("Unknown normalization region.");
457 break;
458 }
459 }
460
471 protected void forward_cudnn(BlobCollection<T> colBottom, BlobCollection<T> colTop)
472 {
473 long hBottomData = colBottom[0].gpu_data;
474 long hTopData = colTop[0].mutable_gpu_data;
475
477 m_cuda.DivisiveNormalizationForward(m_hCuDnn, m_hNormDesc, m_tOne, m_hBottomDesc, hBottomData, m_hTempData1, m_hTempData2, m_tZero, m_hTopDesc, hTopData);
478 else
479 m_cuda.LRNCrossChannelForward(m_hCuDnn, m_hNormDesc, m_tOne, m_hBottomDesc, hBottomData, m_tZero, m_hTopDesc, hTopData);
480 }
481
492 protected void backward_cudnn(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
493 {
494 long hTopDiff = colTop[0].gpu_diff;
495 long hTopData = colTop[0].gpu_data;
496 long hBottomData = colBottom[0].gpu_data;
497 long hBottomDiff = colBottom[0].mutable_gpu_diff;
498
500 m_cuda.DivisiveNormalizationBackward(m_hCuDnn, m_hNormDesc, m_tOne, m_hBottomDesc, hBottomData, hTopDiff, m_hTempData1, m_hTempData2, m_tZero, m_hBottomDesc, hBottomDiff);
501 else
502 m_cuda.LRNCrossChannelBackward(m_hCuDnn, m_hNormDesc, m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData, m_tZero, m_hBottomDesc, hBottomDiff);
503 }
504
505 private void CrossChannelForward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
506 {
507 // First, compute scale
508 long hBottomData = colBottom[0].gpu_data;
509 long hTopData = colTop[0].mutable_gpu_data;
510 long hScaleData = m_blobScale.mutable_gpu_data;
511
512 // We will launch one kernel for each pixel location, and have the kernel
513 // go through all of the channels.
514 int nThreads = m_nNum * m_nHeight * m_nWidth;
515 m_cuda.lrn_fillscale(nThreads, hBottomData, m_nNum, m_nChannels, m_nHeight, m_nWidth, m_nSize, convert(m_dfAlpha / m_nSize), convert(m_dfK), hScaleData);
516
517 nThreads = colBottom[0].count();
518 m_cuda.lrn_computeoutput(nThreads, hBottomData, hScaleData, convert(-m_dfBeta), hTopData);
519 }
520
521 private void WithinChannelForward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
522 {
523 m_splitLayer.Forward(colBottom, m_colSplitTopVec);
524 m_squareLayer.Forward(m_colSquareBottomVec, m_colSquareTopVec);
525 m_poolLayer.Forward(m_colSquareTopVec, m_colPoolTopVec);
526 m_powerLayer.Forward(m_colPoolTopVec, m_colPowerTopVec);
527 m_productLayer.Forward(m_colProductBottomVec, colTop);
528 }
529
530 private void CrossChannelBackward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
531 {
532 int nThreads = m_nNum * m_nHeight * m_nWidth;
533 long hBottomData = colBottom[0].gpu_data;
534 long hTopData = colTop[0].gpu_data;
535 long hScaleData = m_blobScale.gpu_data;
536 long hTopDiff = colTop[0].gpu_diff;
537 long hBottomDiff = colBottom[0].mutable_gpu_diff;
538
539 m_cuda.lrn_computediff(nThreads, hBottomData, hTopData, hScaleData, hTopDiff, m_nNum, m_nChannels, m_nHeight, m_nWidth, m_nSize, convert(-m_dfBeta), convert(2.0 * m_dfAlpha * m_dfBeta / m_nSize), hBottomDiff);
540 }
541
542 private void WithinChannelBackward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
543 {
544 if (rgbPropagateDown[0])
545 {
546 List<bool> rgbProductPropagateDown = Utility.Create<bool>(2, true);
547 m_productLayer.Backward(colTop, rgbProductPropagateDown, m_colProductBottomVec);
548 m_powerLayer.Backward(m_colPowerTopVec, rgbPropagateDown, m_colPoolTopVec);
549 m_poolLayer.Backward(m_colPoolTopVec, rgbPropagateDown, m_colSquareTopVec);
550 m_squareLayer.Backward(m_colSquareTopVec, rgbPropagateDown, m_colSquareBottomVec);
551 m_splitLayer.Backward(m_colSplitTopVec, rgbPropagateDown, colBottom);
552 }
553 }
554 }
555}
The Log class provides general output in text form.
Definition: Log.cs:13
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
Definition: Log.cs:394
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
The Utility class provides general utility funtions.
Definition: Utility.cs:35
static List< int > Create(int nCount, int nStart, int nInc)
Create a new List and fill it with values starting with start and incrementing by inc.
Definition: Utility.cs:721
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
Definition: Blob.cs:402
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
The EltwiseLayer computes elementwise oeprations, such as product and sum, along multiple input blobs...
Definition: EltwiseLayer.cs:23
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
The "Local Response Normalization" LRNLayer is used to normalize the input in a local region across o...
Definition: LRNLayer.cs:21
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input
Definition: LRNLayer.cs:211
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation using the Engine.CUDNN mode.
Definition: LRNLayer.cs:471
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation using the Engine.CAFFE mode.
Definition: LRNLayer.cs:413
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the inputs using the Engine.CAFFE mode.
Definition: LRNLayer.cs:443
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: LRNLayer.cs:306
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
Definition: LRNLayer.cs:182
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: LRNLayer.cs:103
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation using either the Engine.CUDNN or Engine.CAFFE mode depending on the engine parame...
Definition: LRNLayer.cs:374
LRNLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The LRNLayer constructor.
Definition: LRNLayer.cs:84
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the inputs using either the Engine.CUDNN or Engine....
Definition: LRNLayer.cs:394
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: lrn
Definition: LRNLayer.cs:219
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer for both Engine.CUDNN and Engine.CAFFE modes.
Definition: LRNLayer.cs:228
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the inputs using the Engine.CUDNN mode.
Definition: LRNLayer.cs:492
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
void Backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Given the top Blob error gradients, compute the bottom Blob error gradients.
Definition: Layer.cs:815
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
double Forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Given the bottom (input) Blobs, this function computes the top (output) Blobs and the loss.
Definition: Layer.cs:728
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
Definition: Layer.cs:84
void Dispose()
Releases all GPU and host resources used by the Layer.
Definition: Layer.cs:180
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
void Setup(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Implements common Layer setup functionality.
Definition: Layer.cs:439
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the top (output) Blob to have the same shape as the bottom (input) Blob.
Definition: NeuronLayer.cs:64
The PoolingLayer pools the input image by taking the max, average, etc. within regions....
Definition: PoolingLayer.cs:23
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
The PowerLayer computes the power of the input. This layer is initialized with the MyCaffe....
Definition: PowerLayer.cs:24
The SplitLayer creates a 'split' path in the network by copying the bottom blob into multiple top blo...
Definition: SplitLayer.cs:17
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: SplitLayer.cs:69
Specifies the parameters for the EltwiseLayer.
EltwiseOp
Defines the operation to perform.
EltwiseOp operation
Specifies the element-wise operation.
List< uint > kernel_size
Kernel size is given as a single value for equal dimensions in all spatial dimensions,...
List< uint > pad
Pad is given as a single value for equal dimensions in all spatial dimensions, or once per spatial di...
Specifies the parameter for the LRNLayer.
Definition: LRNParameter.cs:20
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
Definition: LRNParameter.cs:67
NormRegion
Defines the normalization region.
Definition: LRNParameter.cs:31
double beta
Specifies the beta value used as the power parameter in the normalization formula....
NormRegion norm_region
Specifies the region over which to normalize.
uint local_size
Specifies the local size of the normalization window width.
Definition: LRNParameter.cs:81
double alpha
Specifies the alpha value used for variance scaling in the normalization formula. NOTE: cuDNN uses a ...
Definition: LRNParameter.cs:91
double k
Specifies the k value used by the normalization parameter. NOTE: cuDNN uses a default of k = 2....
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LRNParameter lrn_param
Returns the parameter set when initialized with LayerType.LRN
PoolingParameter pooling_param
Returns the parameter set when initialized with LayerType.POOLING
bool use_halfsize
Specifies whether or not to use half sized memory or not.
EltwiseParameter eltwise_param
Returns the parameter set when initialized with LayerType.ELTWISE
PowerParameter power_param
Returns the parameter set when initialized with LayerType.POWER
LayerType
Specifies the layer type.
Specifies the parameters for the PoolingLayer.
PoolingMethod
Defines the pooling method.
PoolingMethod pool
Specifies the pooling method.
double power
Specifies power value in the formula .
double scale
Specifies scale value in the formula .
double shift
Specifies shift value in the formula .
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12