MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
PoolingLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using System.Drawing;
6using MyCaffe.basecode;
7using MyCaffe.common;
8using MyCaffe.param;
9
10namespace MyCaffe.layers
11{
22 public class PoolingLayer<T> : Layer<T>
23 {
24 int m_nKernelH;
25 int m_nKernelW;
26 int m_nStrideH;
27 int m_nStrideW;
28 int m_nPadH;
29 int m_nPadW;
30 int m_nChannels;
31 int m_nHeight;
32 int m_nWidth;
33 int m_nPooledHeight;
34 int m_nPooledWidth;
35 bool m_bGlobalPooling;
36 Blob<T> m_blobRandIdx;
37 Blob<T> m_blobMaxIdx;
38
39 long m_hCudnn = 0;
40 long m_hBottomDesc = 0;
41 long m_hTopDesc = 0;
42 long m_hPoolingDesc = 0;
43 PoolingMethod m_method;
44
74 : base(cuda, log, p)
75 {
77 m_blobRandIdx = new Blob<T>(cuda, log);
78 m_blobRandIdx.Name = m_param.name + " randidx";
79 m_blobMaxIdx = new Blob<T>(cuda, log);
80 m_blobMaxIdx.Name = m_param.name + " maxidx";
81 }
82
84 protected override void dispose()
85 {
86 if (m_hPoolingDesc != 0)
87 {
88 m_cuda.FreePoolingDesc(m_hPoolingDesc);
89 m_hPoolingDesc = 0;
90 }
91
92 if (m_hTopDesc != 0)
93 {
94 m_cuda.FreeTensorDesc(m_hTopDesc);
95 m_hTopDesc = 0;
96 }
97
98 if (m_hBottomDesc != 0)
99 {
100 m_cuda.FreeTensorDesc(m_hBottomDesc);
101 m_hBottomDesc = 0;
102 }
103
104 if (m_hCudnn != 0)
105 {
106 m_cuda.FreeCuDNN(m_hCudnn);
107 m_hCudnn = 0;
108 }
109
110 m_blobRandIdx.Dispose();
111 m_blobMaxIdx.Dispose();
112 base.dispose();
113 }
114
116 protected override void setup_internal_blobs(BlobCollection<T> col)
117 {
118 if (col.Count > 0)
119 return;
120
122 {
123 col.Add(m_blobRandIdx);
124 col.Add(m_blobMaxIdx);
125 }
126 }
127
131 public override int ExactNumBottomBlobs
132 {
133 get { return 1; }
134 }
135
139 public override int ExactNumTopBlobs
140 {
141 get { return (m_param.pooling_param.engine == EngineParameter.Engine.CAFFE) ? -1 : 1; }
142 }
143
147 public override int MinTopBlobs
148 {
149 get { return (m_param.pooling_param.engine == EngineParameter.Engine.CAFFE) ? 1 : -1; }
150 }
151
156 public override int MaxTopBlobs
157 {
158 get { return (m_param.pooling_param.pool == PoolingParameter.PoolingMethod.MAX) ? 2 : 1; }
159 }
160
166 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
167 {
169
170 if (p.global_pooling)
171 {
172 if (!(p.kernel_size.Count > 0 || p.kernel_h.HasValue || p.kernel_w.HasValue))
173 m_log.WriteLine("WARNING: With global pooling = true, Filter size cannot be specified, the bottom hxw = '" + colBottom[0].height.ToString() + "x" + colBottom[0].width.ToString() + "' will be used instead for the kernel size.");
174 }
175 else
176 {
177 m_log.CHECK(!(p.kernel_size.Count > 0) != !(p.kernel_h.HasValue && p.kernel_w.HasValue), "Filter size is kernel_size OR kernel_h and kernel_w; not both.");
178 m_log.CHECK(p.kernel_size.Count > 0 || (p.kernel_h.HasValue && p.kernel_w.HasValue), "For non-square filters, both kernel_h and kernel_w are required.");
179 }
180
181 m_log.CHECK(((p.pad.Count == 0) && p.pad_h.HasValue && p.pad_w.HasValue) || (!p.pad_h.HasValue && !p.pad_w.HasValue), "Pad is pad or pad_h and pad_w are required.");
182 m_log.CHECK(((p.stride.Count == 0) && p.stride_h.HasValue && p.stride_w.HasValue) || (!p.stride_h.HasValue && !p.stride_w.HasValue), "Stride is stride or stride_h and stride_w are required.");
183 m_bGlobalPooling = p.global_pooling;
184
185
186 //---- Kernel Size ----
187
188 if (m_bGlobalPooling)
189 {
190 m_nKernelH = colBottom[0].height;
191 m_nKernelW = colBottom[0].width;
192 }
193 else
194 {
195 if (p.kernel_size.Count > 0)
196 {
197 m_nKernelH = (int)p.kernel_size[0];
198 m_nKernelW = (int)p.kernel_size[0];
199 }
200 else
201 {
202 m_nKernelH = (int)p.kernel_h.Value;
203 m_nKernelW = (int)p.kernel_w.Value;
204 }
205 }
206
207 m_log.CHECK_GT(m_nKernelH, 0, "Filter dimensions cannot be zero.");
208 m_log.CHECK_GT(m_nKernelW, 0, "Filter dimensions cannot be zero.");
209
210
211 //---- Pad ----
212
213 if (p.pad.Count > 0)
214 {
215 m_nPadH = (int)p.pad[0];
216 m_nPadW = (int)p.pad[0];
217 }
218 else
219 {
220 m_nPadH = (p.pad_h.HasValue) ? (int)p.pad_h.Value : 0;
221 m_nPadW = (p.pad_w.HasValue) ? (int)p.pad_w.Value : 0;
222 }
223
224
225 //---- Stride ----
226
227 if (p.stride.Count > 0)
228 {
229 m_nStrideH = (int)p.stride[0];
230 m_nStrideW = (int)p.stride[0];
231 }
232 else
233 {
234 m_nStrideH = (p.stride_h.HasValue) ? (int)p.stride_h.Value : 1;
235 m_nStrideW = (p.stride_w.HasValue) ? (int)p.stride_w.Value : 1;
236 }
237
238 if (m_bGlobalPooling)
239 m_log.CHECK(m_nPadH == 0 && m_nPadW == 0 && m_nStrideH == 1 && m_nStrideW == 1, "With global pooling = true, only pad = 0 and stride = 1 allowed.");
240
241 if (m_nPadH != 0 || m_nPadW != 0)
242 {
244 m_param.pooling_param.pool == PoolingParameter.PoolingMethod.MAX, "Padding implemented for AVE and MAX pooling only.");
245 m_log.CHECK_LT(m_nPadH, m_nKernelH, "The pad_h must be <= kernel_h.");
246 m_log.CHECK_LT(m_nPadW, m_nKernelW, "The pad_w must be <= kernel_w.");
247 }
248
250 return;
251
252
253 //---------------------------------------------
254 // cuDnn specific pooling.
255 //
256 // Note only MAX and AVE pooling are supported.
257 //---------------------------------------------
258
259 // Setup the convert to half flags used by the Layer just before calling forward and backward.
261
263 m_method = PoolingMethod.MAX;
264 else
265 m_method = PoolingMethod.AVE;
266
267 m_hCudnn = m_cuda.CreateCuDNN();
268 m_hBottomDesc = m_cuda.CreateTensorDesc();
269 m_hTopDesc = m_cuda.CreateTensorDesc();
270 m_hPoolingDesc = m_cuda.CreatePoolingDesc();
271 m_cuda.SetPoolingDesc(m_hPoolingDesc, m_method, m_nKernelH, m_nKernelW, m_nPadH, m_nPadW, m_nStrideH, m_nStrideW);
272 }
273
279 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
280 {
281 m_log.CHECK_EQ(4, colBottom[0].num_axes, "Input must have 4 axes, corresponding to (num, channels, height, width)");
282
283 m_nChannels = colBottom[0].channels;
284 m_nHeight = colBottom[0].height;
285 m_nWidth = colBottom[0].width;
286
287 if (m_bGlobalPooling)
288 {
289 m_nKernelH = colBottom[0].height;
290 m_nKernelW = colBottom[0].width;
291 }
292
294 {
295 m_nPooledHeight = (int)Math.Floor((double)((m_nHeight + 2 * m_nPadH - m_nKernelH) / (double)m_nStrideH) + 1);
296 m_nPooledWidth = (int)Math.Floor((double)((m_nWidth + 2 * m_nPadW - m_nKernelW) / (double)m_nStrideW) + 1);
297 }
298 else // use original CAFFE method.
299 {
300 m_nPooledHeight = (int)Math.Ceiling((double)((m_nHeight + 2 * m_nPadH - m_nKernelH) / (double)m_nStrideH)) + 1;
301 m_nPooledWidth = (int)Math.Ceiling((double)((m_nWidth + 2 * m_nPadW - m_nKernelW) / (double)m_nStrideW)) + 1;
302 }
303
304 if (m_nPooledHeight <= 0)
305 {
306 m_nPooledHeight = 1;
307 m_log.WriteLine("WARNING: pooling height was 0 in layer '" + m_param.name + "', setting to 1.");
308 }
309
310 if (m_nPooledWidth <= 0)
311 {
312 m_nPooledWidth = 1;
313 m_log.WriteLine("WARNING: pooling width was 0 in layer '" + m_param.name +"', setting to 1.");
314 }
315
316 if (m_nPadH > 0 || m_nPadW > 0)
317 {
318 // If we have padding, ensure that the last pooling starts strictly
319 // inside the image (instead of at the padding); otherwise clip the last.
320 if ((m_nPooledHeight - 1) * m_nStrideH >= m_nHeight + m_nPadH)
321 m_nPooledHeight--;
322
323 if ((m_nPooledWidth - 1) * m_nStrideW >= m_nWidth + m_nPadW)
324 m_nPooledWidth--;
325
326 m_log.CHECK_LT((m_nPooledHeight - 1) * m_nStrideH, m_nHeight + m_nPadH, "The pooled height must fit in the image and not overlap onto the padding.");
327 m_log.CHECK_LT((m_nPooledWidth - 1) * m_nStrideW, m_nWidth + m_nPadW, "The pooled width must fit in the image and not overlap onto the padding.");
328 }
329
330 colTop[0].Reshape(colBottom[0].num, m_nChannels, m_nPooledHeight, m_nPooledWidth, m_bUseHalfSize);
331
332 if (colTop.Count > 1)
333 colTop[1].ReshapeLike(colTop[0], m_bUseHalfSize);
334
336 {
337 // If max pooling, we will initialize the vector index part.
339 m_blobMaxIdx.Reshape(colBottom[0].num, m_nChannels, m_nPooledHeight, m_nPooledWidth);
340
341 // If stochastic pooling, we will initialize the random index part.
343 m_blobRandIdx.Reshape(colBottom[0].num, m_nChannels, m_nPooledHeight, m_nPooledWidth);
344
345 return;
346 }
347
348 //---------------------------------------------
349 // cuDnn specific pooling.
350 //---------------------------------------------
351
352 m_cuda.SetTensorDesc(m_hBottomDesc, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_bUseHalfSize);
353 m_cuda.SetTensorDesc(m_hTopDesc, colBottom[0].num, m_nChannels, m_nPooledHeight, m_nPooledWidth, m_bUseHalfSize);
354 }
355
361 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
362 {
364 forward_cuda(colBottom, colTop);
365 else
366 forward_cudnn(colBottom, colTop);
367 }
368
375 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
376 {
378 backward_cuda(colTop, rgbPropagateDown, colBottom);
379 else
380 backward_cudnn(colTop, rgbPropagateDown, colBottom);
381 }
382
388 protected void forward_cuda(BlobCollection<T> colBottom, BlobCollection<T> colTop)
389 {
390 long hBottomData = colBottom[0].gpu_data;
391 long hTopData = colTop[0].mutable_gpu_data;
392 int nCount = colTop[0].count();
393
394 // We'll output the mask to top[1] if its of size > 1
395 bool bUseTopMask = (colTop.Count > 1) ? true : false;
396 long hMask = 0;
397 long hTopMask = 0;
398
399 switch (m_param.pooling_param.pool)
400 {
402 if (bUseTopMask)
403 hTopMask = colTop[1].mutable_gpu_data;
404 else
405 hMask = m_blobMaxIdx.mutable_gpu_data;
406 m_cuda.pooling_fwd(POOLING_METHOD.MAX, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, hMask, hTopMask);
407 break;
408
410 m_cuda.pooling_fwd(POOLING_METHOD.AVE, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, 0, 0);
411 break;
412
413 case PoolingParameter.PoolingMethod.STOCHASTIC:
414 m_cuda.rng_uniform(nCount, m_tZero, m_tOne, m_blobRandIdx.mutable_gpu_data);
415 if (m_phase == Phase.TRAIN)
416 m_cuda.pooling_fwd(POOLING_METHOD.STO_TRAIN, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, m_blobRandIdx.gpu_data, 0);
417 else
418 m_cuda.pooling_fwd(POOLING_METHOD.STO_TEST, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, m_blobRandIdx.gpu_data, 0);
419 break;
420
421 default:
422 m_log.FAIL("Unknown pooling method!");
423 break;
424 }
425 }
426
433 protected void backward_cuda(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
434 {
435 if (!rgbPropagateDown[0])
436 return;
437
438 long hTopDiff = colTop[0].gpu_diff;
439 long hBottomDiff = colBottom[0].mutable_gpu_diff;
440 int nCount = colBottom[0].count();
441
442 // We'll output the mask to top[1] if its of size > 1.
443 bool bUseTopMask = (colTop.Count > 1) ? true : false;
444 long hMask = 0;
445 long hTopMask = 0;
446
447 switch (m_param.pooling_param.pool)
448 {
450 if (bUseTopMask)
451 hTopMask = colTop[1].gpu_data;
452 else
453 hMask = m_blobMaxIdx.gpu_data;
454
455 m_cuda.pooling_bwd(POOLING_METHOD.MAX, nCount, hTopDiff, colTop[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hBottomDiff, hMask, hTopMask);
456 break;
457
459 m_cuda.pooling_bwd(POOLING_METHOD.AVE, nCount, hTopDiff, colTop[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hBottomDiff, 0, 0);
460 break;
461
462 case PoolingParameter.PoolingMethod.STOCHASTIC:
463 m_cuda.pooling_bwd(POOLING_METHOD.STO_TRAIN, nCount, hTopDiff, colTop[0].num, m_nChannels, m_nHeight, m_nWidth, m_nPooledHeight, m_nPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hBottomDiff, m_blobRandIdx.gpu_data, 0);
464 break;
465
466 default:
467 m_log.FAIL("Unknown pooling method!");
468 break;
469 }
470 }
471
477 protected void forward_cudnn(BlobCollection<T> colBottom, BlobCollection<T> colTop)
478 {
479 long hBottomData = colBottom[0].gpu_data;
480 long hTopData = colTop[0].mutable_gpu_data;
481
482 m_cuda.PoolingForward(m_hCudnn, m_hPoolingDesc, m_tOne, m_hBottomDesc, hBottomData, m_tZero, m_hTopDesc, hTopData);
483 }
484
491 protected void backward_cudnn(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
492 {
493 long hTopDiff = colTop[0].gpu_diff;
494 long hTopData = colTop[0].gpu_data;
495 long hBottomData = colBottom[0].gpu_data;
496 long hBottomDiff = colBottom[0].mutable_gpu_diff;
497
498 m_cuda.PoolingBackward(m_hCudnn, m_hPoolingDesc, m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData, m_tZero, m_hBottomDesc, hBottomDiff);
499 }
500 }
501}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK(bool b, string str)
Test a flag for true.
Definition: Log.cs:227
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
Definition: Log.cs:80
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
Definition: Log.cs:394
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
Definition: Log.cs:299
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
Definition: Log.cs:275
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
Definition: Blob.cs:402
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
Definition: Layer.cs:84
Phase m_phase
Specifies the Phase under which the Layer is run.
Definition: Layer.cs:51
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The PoolingLayer pools the input image by taking the max, average, etc. within regions....
Definition: PoolingLayer.cs:23
override int? MinTopBlobs
Currentlym Engine.CUDNN does not support the extra top blob.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer for use with both Engine.CAFFE and Engine.CUDNN modes.
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: PoolingLayer.cs:84
override int? MaxTopBlobs
MAX Pool layers can output an extra top blob for the mask; others can only output the pooled inputs.
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using the Engine.CUDNN mode as specified in the LayerParameter.
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using the Engine.CAFFE mode as specified in the LayerParameter.
override int? ExactNumTopBlobs
Returns the required number of top (output) Blobs: pool, mask (Engine.CAFFE only)
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using either the Engine.CAFFE or Engine.CUDNN mode as specified in the La...
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
PoolingLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The PoolingLayer constructor.
Definition: PoolingLayer.cs:73
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using either the Engine.CAFFE or Engine.CUDNN mode as specified in the L...
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using the Engine.CAFFE mode as specified in the LayerParameter.
override int ExactNumBottomBlobs
Returns the required number of bottom (input) Blobs: input
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using the Engine.CUDNN mode as specified in the LayerParameter.
Specifies whether to use the NVIDIA cuDnn version or Caffe version of a given forward/backward operat...
Engine engine
Specifies the Engine in use.
Engine
Defines the type of engine to use.
uint? stride_h
The stride height (2D only)
List< uint > kernel_size
Kernel size is given as a single value for equal dimensions in all spatial dimensions,...
uint? stride_w
The stride width (2D only)
uint? pad_h
The padding height (2D only)
uint? kernel_h
The kernel height (2D only)
List< uint > stride
Stride is given as a single value for equal dimensions in all spatial dimensions, or once per spatial...
uint? kernel_w
The kernel width (2D only)
uint? pad_w
The padding width (2D only)
List< uint > pad
Pad is given as a single value for equal dimensions in all spatial dimensions, or once per spatial di...
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
PoolingParameter pooling_param
Returns the parameter set when initialized with LayerType.POOLING
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
Specifies the parameters for the PoolingLayer.
PoolingReshapeAlgorithm
Defines the pooling reshape algorithm to use.
PoolingMethod
Defines the pooling method.
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
PoolingMethod pool
Specifies the pooling method.
bool global_pooling
Specifies whether or not to enable global pooling.
PoolingReshapeAlgorithm reshape_algorithm
Specifies the reshape algorithm to use, either the original Caffe reshape (default = false) or the ne...
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
Phase
Defines the Phase under which to run a Net.
Definition: Interfaces.cs:61
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
PoolingMethod
Specifies the pooling method used by the cuDnn function SetPoolingDesc.
Definition: CudaDnn.cs:177
POOLING_METHOD
Specifies the pooling method to use when using the Caffe pooling (instead of the pooling from NVIDIA'...
Definition: CudaDnn.cs:353
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12