MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
DecodeLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using System.Diagnostics;
6using MyCaffe.basecode;
7using MyCaffe.common;
8using MyCaffe.param;
9
10namespace MyCaffe.layers.beta
11{
24 public class DecodeLayer<T> : Layer<T>
25 {
26 List<int> m_rgIgnoreLabels = new List<int>();
27 int m_nCentroidOutputIteration = 300;
28 int m_nCacheSize = 100;
29 int m_nNum = 0;
30 int m_nEncodingDim = 0;
31 Blob<T> m_blobData;
32 Blob<T> m_blobDistSq;
33 Blob<T> m_blobSummerVec;
34 Blob<T> m_blobWork;
35 int m_nLabelCount = 0;
36 int m_nIteration = 0;
37 long m_hMin = 0;
38 long m_hMax = 0;
39 double m_dfPreGenAlpha = 0;
40 bool m_bInitializePreGenTargets = true;
41
49 : base(cuda, log, p)
50 {
52 m_blobDistSq = new Blob<T>(cuda, log, false);
53 m_blobDistSq.Name = m_param.name + " distsq";
54 m_blobSummerVec = new Blob<T>(cuda, log, false);
55 m_blobSummerVec.Name = m_param.name + " sum";
56 m_blobData = new Blob<T>(cuda, log);
57 m_blobData.Name = m_param.name + " data";
58
59 m_hMin = cuda.AllocHostBuffer(m_param.decode_param.k);
60 m_hMax = cuda.AllocHostBuffer(m_param.decode_param.k);
61 m_blobWork = new Blob<T>(cuda, log);
62 m_blobWork.Name = "work";
63 }
64
66 protected override void dispose()
67 {
68 dispose(ref m_blobDistSq);
69 dispose(ref m_blobSummerVec);
70 dispose(ref m_blobData);
71 dispose(ref m_blobWork);
72
73 base.dispose();
74 }
75
77 protected override void setup_internal_blobs(BlobCollection<T> col)
78 {
79 if (col.Count > 0)
80 return;
81
82 col.Add(m_blobDistSq);
83 col.Add(m_blobSummerVec);
84 col.Add(m_blobData);
85 }
86
90 public override int MinBottomBlobs
91 {
92 get { return 1; }
93 }
94
98 public override int MaxBottomBlobs
99 {
100 get { return 2; }
101 }
102
106 public override int MinTopBlobs
107 {
108 get { return 1; }
109 }
110
114 public override int MaxTopBlobs
115 {
116 get { return 2; }
117 }
118
124 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
125 {
126 m_rgIgnoreLabels = m_param.decode_param.ignore_labels;
127 m_nEncodingDim = colBottom[0].count(1);
128
129 if (m_param.decode_param.target != param.beta.DecodeParameter.TARGET.PREGEN)
130 {
131 if (m_param.decode_param.enable_centroid_update)
132 {
133 m_nCentroidOutputIteration = m_param.decode_param.centroid_output_iteration;
134 if (m_nCentroidOutputIteration < 10)
135 m_log.WriteLine("WARNING: Centroid output iteration is set at " + m_nCentroidOutputIteration.ToString() + ", a value above 10 is recommended.");
136 }
137 }
138 else
139 {
140 m_nCentroidOutputIteration = 0;
141 }
142
143 m_nCacheSize = m_param.decode_param.cache_size;
144 m_log.CHECK_GT(m_nCacheSize, 0, "The cache size must be > 0.");
145
146 m_dfPreGenAlpha = m_param.decode_param.pregen_alpha;
147
148 if (m_colBlobs.Count == 0)
149 {
150 Blob<T> blobCentroids = new Blob<T>(m_cuda, m_log, false);
151 blobCentroids.Name = m_param.name + " centroids";
152 blobCentroids.reshape_when_sharing = true;
153
154 List<int> rgCentroidShape = new List<int>() { 0 }; // skip size check.
155 if (!shareParameter(blobCentroids, rgCentroidShape))
156 {
157 blobCentroids.Reshape(2, m_nEncodingDim, 1, 1); // set to at least two labels initially (may get expanded in forward).
158 blobCentroids.SetData(0);
159 }
160
161 m_colBlobs.Add(blobCentroids);
162
163 Blob<T> blobStatus = new Blob<T>(m_cuda, m_log, false);
164 blobStatus.Name = m_param.name + " status";
165 blobStatus.reshape_when_sharing = true;
166
167 List<int> rgStatusShape = new List<int>() { 0 }; // skip size check.
168 if (!shareParameter(blobStatus, rgStatusShape))
169 {
170 blobStatus.Reshape(1, 1, 1, 1); // This will be resized to the label count x 2
171 blobStatus.SetData(0);
172 }
173
174 m_colBlobs.Add(blobStatus);
175
176 Blob<T> blobEncodingCounts = new Blob<T>(m_cuda, m_log, false);
177 blobEncodingCounts.Name = m_param.name + " enc_counts";
178 blobEncodingCounts.reshape_when_sharing = true;
179
180 List<int> rgEncCountShape = new List<int>() { 0 }; // skip size check.
181 if (!shareParameter(blobEncodingCounts, rgEncCountShape))
182 {
183 blobEncodingCounts.Reshape(1, 1, 1, 1); // This will be resized to the label count.
184 blobEncodingCounts.SetData(0);
185 }
186
187 m_colBlobs.Add(blobEncodingCounts);
188
189 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
190 {
191 Blob<T> blobEncodings = new Blob<T>(m_cuda, m_log, false);
192 blobEncodings.Name = m_param.name + " encodings";
193 blobEncodings.reshape_when_sharing = true;
194
195 List<int> rgEncShape = new List<int>() { 0 }; // skip size check.
196 if (!shareParameter(blobEncodings, rgEncShape))
197 {
198 blobEncodings.Reshape(1, 1, m_nEncodingDim, 1); // This will be resized to the label count x nMaxItems x nEncDim.
199 blobEncodings.SetData(0);
200 }
201
202 m_colBlobs.Add(blobEncodings);
203 }
204 }
205
206 m_nIteration = 0;
207 }
208
214 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
215 {
216 int nNum = colBottom[0].num;
217 bool bFirstReshape = (nNum != m_nNum) ? true : false;
218 m_nNum = nNum;
219
220 m_log.CHECK_EQ(m_nEncodingDim, colBottom[0].count(1), "The encoding dim changed!");
221
222 if (colBottom.Count > 1)
223 m_log.CHECK_EQ(colBottom[1].num, m_nNum, "The number of labels does not match the number of items at bottom[0].");
224 }
225
226
232 public void createPreGenTargets(Blob<T> b, double dfMinDist)
233 {
234 Random rand = new Random();
235 bool bDone = false;
236 int nNum = b.num;
237 int nDim = b.count(1);
238 float[] rgData = convertF(b.mutable_cpu_data);
239
240 while (!bDone)
241 {
242 List<List<double>> rgDist = new List<List<double>>();
243
244 double dfAbsMinDist = double.MaxValue;
245
246 for (int i = 0; i < nNum; i++)
247 {
248 rgDist.Add(new List<double>());
249
250 for (int j = 0; j < nNum; j++)
251 {
252 if (i != j)
253 {
254 double dfDiff = 0;
255 double dfDist = 0;
256
257 for (int k = 0; k < nDim; k++)
258 {
259 dfDiff = rgData[i * nDim + k] - rgData[j * nDim + k];
260 dfDist += (dfDiff * dfDiff);
261 }
262
263 rgDist[i].Add(dfDist);
264 dfAbsMinDist = Math.Min(dfAbsMinDist, dfDist);
265
266 if (dfDist < dfMinDist)
267 {
268 for (int k = 0; k < nDim; k++)
269 {
270 rgData[j * nDim + k] += (float)(rand.NextDouble() * (dfMinDist / 4));
271 }
272 }
273 }
274 }
275 }
276
277 if (dfAbsMinDist > dfMinDist)
278 bDone = true;
279 }
280
281 b.mutable_cpu_data = convert(rgData);
282 }
283
304 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
305 {
306 int nItemNum = colBottom[0].num;
307 int nItemCount = nItemNum * m_nCacheSize;
308 int nLabelDim = 0;
309 double dfAlpha = 1.0 / (double)nItemCount;
310 double[] rgBottomLabel = null;
311
312 if (m_param.phase == Phase.TRAIN)
313 {
314 nLabelDim = colBottom[1].count(1);
315 m_log.CHECK(colBottom[1].count() % nLabelDim == 0, "The bottom[1] count must be a factor of 2 for {lbl1, lbl2}, or 3 for {anc, pos, neg}.");
316
317 rgBottomLabel = convertD(colBottom[1].update_cpu_data());
318
319 int nMaxLabel = rgBottomLabel.Max(p => (int)p);
320 if (nMaxLabel > m_nLabelCount)
321 {
322 int nNumLabels = nMaxLabel + 1;
323
324 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.PREGEN)
325 nNumLabels = m_param.decode_param.pregen_label_count;
326
327 if (m_colBlobs[0].count() != nNumLabels * m_nEncodingDim)
328 {
329 m_colBlobs[0].Reshape(nNumLabels, m_nEncodingDim, 1, 1);
330 m_colBlobs[0].SetData(0);
331 }
332
333 if (m_colBlobs[1].count() != nNumLabels)
334 {
335 m_colBlobs[1].Reshape(nNumLabels, 1, 1, 1); // status
336 m_colBlobs[1].SetData(0);
337 }
338
339 if (m_colBlobs[2].count() != nNumLabels)
340 {
341 m_colBlobs[2].Reshape(nNumLabels, 1, 1, 1); // label counts
342 m_colBlobs[2].SetData(0);
343 }
344
345 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
346 {
347 if (m_colBlobs[3].count() != nNumLabels * nItemCount * m_nEncodingDim)
348 {
349 m_colBlobs[3].Reshape(nNumLabels, nItemCount, m_nEncodingDim, 1);
350 m_colBlobs[3].SetData(0);
351 }
352 }
353
354 m_nLabelCount = nNumLabels;
355 }
356
357 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.PREGEN)
358 {
359 if (m_bInitializePreGenTargets)
360 {
361 createPreGenTargets(m_colBlobs[0], m_dfPreGenAlpha * 2);
362 m_colBlobs[0].snapshot_requested = true;
363 m_bInitializePreGenTargets = false;
364 }
365 }
366 }
367
368 int nActiveLabels = m_colBlobs[1].num - m_rgIgnoreLabels.Count;
369
370 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
371 {
372 m_blobData.ReshapeLike(m_colBlobs[3]);
373 m_blobSummerVec.Reshape(m_blobData.channels, 1, 1, 1);
374 m_blobSummerVec.SetData(1.0);
375 m_blobDistSq.ReshapeLike(m_blobSummerVec);
376 }
377 else
378 {
379 m_blobData.ReshapeLike(m_colBlobs[0]);
380 m_blobSummerVec.Reshape(m_blobData.num, 1, 1, 1);
381 m_blobSummerVec.SetData(1.0);
382 m_blobDistSq.ReshapeLike(m_blobSummerVec);
383 }
384
385 if (nActiveLabels <= 0)
386 nActiveLabels = m_colBlobs[0].num;
387
388 colTop[0].Reshape(colBottom[0].num, m_colBlobs[0].num, 1, 1);
389
390 for (int i = 0; i < colBottom[0].num; i++)
391 {
392 // When training, we calculate the targets during observations between nTargetStart and nTargetEnd.
393 if (rgBottomLabel != null)
394 {
395 // Pre-gen targets are ready to go.
396 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.PREGEN)
397 m_colBlobs[1].SetData(1.0);
398
399 int nLabel = (int)rgBottomLabel[i * nLabelDim]; // Only the first embedding and first label are used (second is ignored).
400 int nReady = (int)convertD(m_colBlobs[1].GetData(nLabel));
401 int nLabelItemCount = (int)convertD(m_colBlobs[2].GetData(nLabel));
402
403 // Create the centroid when counts fall between Centroid Start and Centroid End by
404 // averaging all items within these counts together to create the centroid.
405 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.CENTROID)
406 {
407 if (m_param.decode_param.enable_centroid_update)
408 {
409 if (nLabelItemCount == 0)
410 {
411 // Add initial centroid portion for the label.
412 m_cuda.copy(m_nEncodingDim, colBottom[0].gpu_data, m_colBlobs[0].mutable_gpu_data, i * m_nEncodingDim, nLabel * m_nEncodingDim);
413 m_cuda.scale(m_nEncodingDim, convert(dfAlpha), m_colBlobs[0].gpu_data, m_colBlobs[0].mutable_gpu_data, nLabel * m_nEncodingDim, nLabel * m_nEncodingDim);
414 }
415 else if (nLabelItemCount < nItemCount)
416 {
417 dfAlpha = 1.0 / (nLabelItemCount + 1);
418 // Add portion of current item to centroids for the label.
419 m_cuda.add(m_nEncodingDim, colBottom[0].gpu_data, m_colBlobs[0].gpu_data, m_colBlobs[0].mutable_gpu_data, dfAlpha, 1.0 - dfAlpha, i * m_nEncodingDim, nLabel * m_nEncodingDim, nLabel * m_nEncodingDim);
420 }
421 else
422 {
423 // Add portion of current item to centroids for the label.
424 m_cuda.add(m_nEncodingDim, colBottom[0].gpu_data, m_colBlobs[0].gpu_data, m_colBlobs[0].mutable_gpu_data, dfAlpha, 1.0 - dfAlpha, i * m_nEncodingDim, nLabel * m_nEncodingDim, nLabel * m_nEncodingDim);
425
426 if (nReady == 0 && !m_rgIgnoreLabels.Contains(nLabel))
427 m_colBlobs[1].SetData(1.0, nLabel);
428 }
429 }
430 else
431 {
432 m_colBlobs[1].SetData(1.0);
433 }
434 }
435 // Save all items observed to the KNN cache.
436 else if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
437 {
438 // Items added as a rolling list and are ordered by label, then by encoding as each encoding is received.
439 int nSrcOff = i * m_nEncodingDim;
440 int nDstOff = (nLabel * nItemCount * m_nEncodingDim) + ((nLabelItemCount % nItemCount) * m_nEncodingDim);
441 m_cuda.copy(m_nEncodingDim, colBottom[0].gpu_data, m_colBlobs[3].mutable_gpu_data, nSrcOff, nDstOff);
442 }
443
444 m_colBlobs[2].SetData(nLabelItemCount + 1, nLabel);
445 }
446
447 // Request a snapshot when completed to make sure to save latest cache and centroids.
448 int nCompletedTargets = (int)convertD(m_colBlobs[1].asum_data());
449 if (nCompletedTargets == nActiveLabels)
450 {
451 if (m_param.phase == Phase.TRAIN)
452 m_colBlobs[0].snapshot_requested = true;
453 }
454
455 m_log.CHECK_GE(m_blobData.num, m_colBlobs[0].num, "The data blob is not sized correctly!");
456
457 // Load data with the current data embedding across each label 'slot' in blobData.
458 int nCount = m_blobData.count();
459 int nItems = m_blobData.num;
460
461 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
462 nItems *= m_blobData.channels;
463
464 m_cuda.fill(nItems, m_nEncodingDim, colBottom[0].gpu_data, i * m_nEncodingDim, nCount, m_blobData.mutable_gpu_data);
465
466 // When using KNN, find the nearest neighbors from within the cached items.
467 if (m_param.decode_param.target == param.beta.DecodeParameter.TARGET.KNN)
468 {
469 if (nCompletedTargets == nActiveLabels)
470 {
471 m_blobDistSq.ReshapeLike(m_blobSummerVec);
472
473 m_cuda.sub(nCount,
474 m_blobData.gpu_data, // a
475 m_colBlobs[3].gpu_data, // b (saved encodings per label)
476 m_blobData.mutable_gpu_diff); // a_i - b_i
477
478 m_cuda.powx(nCount,
479 m_blobData.gpu_diff, // a_i - b_i
480 2.0,
481 m_blobData.mutable_gpu_diff); // (a_i - b_i)^2
482
483 // Calculate distances of the label items.
484 int nDim = m_blobData.count(1);
485 float[] rgMinDist = new float[m_blobData.num];
486 for (int j = 0; j < m_blobData.num; j++)
487 {
488
489 m_cuda.gemv(false,
490 m_blobData.channels, // item count.
491 m_blobData.height, // encoding size.
492 m_tOne,
493 m_blobData.gpu_diff, // (a_i - b_i)^2
494 m_blobSummerVec.gpu_data,
495 m_tZero,
496 m_blobDistSq.mutable_gpu_data, // \Sum (a_i - b_i)^2
497 j * nDim,
498 0,
499 0);
500
501 m_cuda.minmax(m_blobDistSq.count(), 0, 0, 0, m_param.decode_param.k, m_hMin, m_hMax, true);
502 double[] rgMinD = m_cuda.GetHostMemoryDouble(m_hMin);
503 m_blobWork.Reshape((int)rgMinD[0], 1, 1, 1);
504 m_cuda.minmax(m_blobDistSq.count(), m_blobDistSq.gpu_data, m_blobWork.gpu_data, m_blobWork.gpu_diff, m_param.decode_param.k, m_hMin, m_hMax, true);
505
506 float[] rgMin = m_cuda.GetHostMemoryFloat(m_hMin);
507 List<float> rgMin1 = rgMin.Where(p => p < float.MaxValue).Take(m_param.decode_param.k).ToList();
508
509 rgMinDist[j] = rgMin1.Average();
510 }
511
512 m_blobDistSq.Reshape(rgMinDist.Length, 1, 1, 1);
513 m_blobDistSq.mutable_cpu_data = convert(rgMinDist);
514 }
515 else
516 {
517 m_blobDistSq.Reshape(m_blobData.num, 1, 1, 1);
518 m_blobDistSq.SetData(0);
519
520 if (i == 0 && m_param.phase != Phase.TRAIN)
521 m_log.WriteLine("WARNING: KNN cache still filling...");
522 }
523 }
524 // Otherwise, when using CENTROID or PREGEN, calculate the distance using the latest centroids.
525 else
526 {
527 m_cuda.sub(nCount,
528 m_blobData.gpu_data, // a
529 m_colBlobs[0].gpu_data, // b (centroid)
530 m_blobData.mutable_gpu_diff); // a_i - b_i
531
532 m_cuda.powx(nCount,
533 m_blobData.gpu_diff, // a_i - b_i
534 2.0,
535 m_blobData.mutable_gpu_diff); // (a_i - b_i)^2
536
537 m_cuda.gemv(false,
538 m_blobData.num, // label count.
539 m_blobData.channels, // encoding size.
540 1.0,
541 m_blobData.gpu_diff, // (a_i - b_i)^2
542 m_blobSummerVec.gpu_data,
543 0.0,
544 m_blobDistSq.mutable_gpu_data); // \Sum (a_i - b_i)^2
545 }
546
547 // Set all ignore labels to the float maximum value.
548 foreach (int nIgnoreLabel in m_rgIgnoreLabels)
549 {
550 m_blobDistSq.SetData(float.MaxValue, nIgnoreLabel);
551 }
552
553 // The distances are returned in top[0], where the smallest distance is the detected label.
554 m_cuda.copy(m_blobDistSq.num, m_blobDistSq.gpu_data, colTop[0].mutable_gpu_data, 0, i * m_blobDistSq.num);
555 }
556
557 // If we are to output the centroids, only do so when they are complete, otherwise output 0's.
558 if (colTop.Count > 1)
559 {
560 colTop[1].ReshapeLike(m_colBlobs[0]);
561
562 int nCompletedCentroids = (int)convertD(m_colBlobs[1].asum_data());
563 if (m_nIteration >= m_nCentroidOutputIteration && nCompletedCentroids == nActiveLabels)
564 {
565 m_cuda.copy(m_colBlobs[0].count(), m_colBlobs[0].gpu_data, colTop[1].mutable_gpu_data);
566 }
567 else
568 {
569 if (m_phase != Phase.TRAIN)
570 m_log.WriteLine("WARNING: The centroids for the decode layer are not completed! You must train the model first to calculate the centroids.");
571
572 colTop[1].SetData(0);
573 }
574 }
575
576 m_nIteration++;
577 }
578
580 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
581 {
582 // do nothing.
583 }
584 }
585}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK(bool b, string str)
Test a flag for true.
Definition: Log.cs:227
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
Definition: Log.cs:80
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
Definition: Log.cs:299
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
Definition: Log.cs:287
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
int channels
DEPRECIATED; legacy shape accessor channels: use shape(1) instead.
Definition: Blob.cs:800
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
Definition: Blob.cs:1922
int height
DEPRECIATED; legacy shape accessor height: use shape(2) instead.
Definition: Blob.cs:808
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1555
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
Definition: Blob.cs:1461
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
bool reshape_when_sharing
When true, this Blob is reshaped to the source when sharing the source data (default = false).
Definition: Blob.cs:1803
int count()
Returns the total number of items in the Blob.
Definition: Blob.cs:739
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
Definition: Blob.cs:648
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1541
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
Definition: Blob.cs:792
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
long AllocHostBuffer(long lCapacity)
Allocate a block of host memory with a specified capacity.
Definition: CudaDnn.cs:2581
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
T m_tOne
Specifies a generic type equal to 1.0.
Definition: Layer.cs:72
bool shareParameter(Blob< T > b, List< int > rgMinShape, bool bAllowEndsWithComparison=false)
Attempts to share a parameter Blob if another parameter Blob with the same name and accpetable size i...
Definition: Layer.cs:1152
float convertF(T df)
Converts a generic to a float value.
Definition: Layer.cs:1359
double convertD(T df)
Converts a generic to a double value.
Definition: Layer.cs:1349
Phase m_phase
Specifies the Phase under which the Layer is run.
Definition: Layer.cs:51
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
BlobCollection< T > m_colBlobs
Specifies the learnable parameter Blobs of the Layer.
Definition: Layer.cs:55
The DecodeLayer decodes the label of a classification for an encoding produced by a Siamese Network o...
Definition: DecodeLayer.cs:25
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Definition: DecodeLayer.cs:124
void createPreGenTargets(Blob< T > b, double dfMinDist)
Creates the pre-distanced pre-generated targets, only made public for testing.
Definition: DecodeLayer.cs:232
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward compuation.
Definition: DecodeLayer.cs:304
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented – DecodeLayer cannot be used as a loss.
Definition: DecodeLayer.cs:580
override int MaxTopBlobs
Returns the min number of top blobs: distances, centroids
Definition: DecodeLayer.cs:115
override void dispose()
Releases all GPU and host resources used by the Layer.
Definition: DecodeLayer.cs:66
DecodeLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
Definition: DecodeLayer.cs:48
override int MinTopBlobs
Returns the min number of top blobs: distances
Definition: DecodeLayer.cs:107
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
Definition: DecodeLayer.cs:77
override int MinBottomBlobs
Returns the minimum number of bottom blobs used: predicted (RUN phase)
Definition: DecodeLayer.cs:91
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Definition: DecodeLayer.cs:214
override int MaxBottomBlobs
Returns the maximum number of bottom blobs used: predicted, label (TRAIN and TEST phase)
Definition: DecodeLayer.cs:99
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
Phase phase
Specifies the Phase for which this LayerParameter is run.
DecodeParameter decode_param
Returns the parameter set when initializing with LayerType.DECODE or LayerType.ACCURACY_ENCODING;
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
Phase
Defines the Phase under which to run a Net.
Definition: Interfaces.cs:61
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.beta namespace contains all beta stage layers.
Definition: LayerFactory.cs:9
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12