MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
TrainerPG.cs
1using System;
2using System.Collections.Generic;
3using System.Drawing;
4using System.Linq;
5using System.Text;
6using System.Threading;
7using System.Threading.Tasks;
8using MyCaffe.basecode;
9using MyCaffe.common;
10using MyCaffe.fillers;
11using MyCaffe.layers;
12using MyCaffe.param;
13using MyCaffe.solvers;
14
16{
25 public class TrainerPG<T> : IxTrainerRL, IDisposable
26 {
27 IxTrainerCallback m_icallback;
28 CryptoRandom m_random = new CryptoRandom();
29 MyCaffeControl<T> m_mycaffe;
30 PropertySet m_properties;
31
39 public TrainerPG(MyCaffeControl<T> mycaffe, PropertySet properties, CryptoRandom random, IxTrainerCallback icallback)
40 {
41 m_icallback = icallback;
42 m_mycaffe = mycaffe;
43 m_properties = properties;
44 m_random = random;
45 }
46
50 public void Dispose()
51 {
52 }
53
58 public bool Initialize()
59 {
60 m_mycaffe.CancelEvent.Reset();
61 m_icallback.OnInitialize(new InitializeArgs(m_mycaffe));
62 return true;
63 }
64
65 private void wait(int nWait)
66 {
67 int nWaitInc = 250;
68 int nTotalWait = 0;
69
70 while (nTotalWait < nWait)
71 {
72 m_icallback.OnWait(new WaitArgs(nWaitInc));
73 nTotalWait += nWaitInc;
74 }
75 }
76
82 public bool Shutdown(int nWait)
83 {
84 if (m_mycaffe != null)
85 {
86 m_mycaffe.CancelEvent.Set();
87 wait(nWait);
88 }
89
90 m_icallback.OnShutdown();
91
92 return true;
93 }
94
100 public ResultCollection RunOne(int nDelay = 1000)
101 {
102 m_mycaffe.CancelEvent.Reset();
103 Agent<T> agent = new Agent<T>(m_icallback, m_mycaffe, m_properties, m_random, Phase.TRAIN);
104 agent.Run(Phase.TEST, 1, ITERATOR_TYPE.ITERATION, TRAIN_STEP.NONE);
105 agent.Dispose();
106 return null;
107 }
108
116 public byte[] Run(int nN, PropertySet runProp, out string type)
117 {
118 m_mycaffe.CancelEvent.Reset();
119 Agent<T> agent = new Agent<T>(m_icallback, m_mycaffe, m_properties, m_random, Phase.RUN);
120 byte[] rgResults = agent.Run(nN, out type);
121 agent.Dispose();
122
123 return rgResults;
124 }
125
132 public bool Test(int nN, ITERATOR_TYPE type)
133 {
134 int nDelay = 1000;
135 string strProp = m_properties.ToString();
136
137 // Turn off the num-skip to run at normal speed.
138 strProp += "EnableNumSkip=False;";
139 PropertySet properties = new PropertySet(strProp);
140
141 m_mycaffe.CancelEvent.Reset();
142 Agent<T> agent = new Agent<T>(m_icallback, m_mycaffe, properties, m_random, Phase.TRAIN);
143 agent.Run(Phase.TEST, nN, type, TRAIN_STEP.NONE);
144
145 agent.Dispose();
146 Shutdown(nDelay);
147
148 return true;
149 }
150
158 public bool Train(int nN, ITERATOR_TYPE type, TRAIN_STEP step)
159 {
160 m_mycaffe.CancelEvent.Reset();
161 Agent<T> agent = new Agent<T>(m_icallback, m_mycaffe, m_properties, m_random, Phase.TRAIN);
162 agent.Run(Phase.TRAIN, nN, type, step);
163 agent.Dispose();
164
165 return false;
166 }
167 }
168
169 class Agent<T> : IDisposable
170 {
171 IxTrainerCallback m_icallback;
172 Brain<T> m_brain;
173 PropertySet m_properties;
174 CryptoRandom m_random;
175 float m_fGamma;
176 bool m_bAllowDiscountReset = false;
177 bool m_bUseRawInput = false;
178
179 public Agent(IxTrainerCallback icallback, MyCaffeControl<T> mycaffe, PropertySet properties, CryptoRandom random, Phase phase)
180 {
181 m_icallback = icallback;
182 m_brain = new Brain<T>(mycaffe, properties, random, phase);
183 m_properties = properties;
184 m_random = random;
185
186 m_fGamma = (float)properties.GetPropertyAsDouble("Gamma", 0.99);
187 m_bAllowDiscountReset = properties.GetPropertyAsBool("AllowDiscountReset", false);
188 m_bUseRawInput = properties.GetPropertyAsBool("UseRawInput", false);
189 }
190
191 public void Dispose()
192 {
193 if (m_brain != null)
194 {
195 m_brain.Dispose();
196 m_brain = null;
197 }
198 }
199
200 private StateBase getData(Phase phase, int nAction)
201 {
202 GetDataArgs args = m_brain.getDataArgs(phase, nAction);
203 m_icallback.OnGetData(args);
204 return args.State;
205 }
206
207 private void updateStatus(int nIteration, int nEpisodeCount, double dfRewardSum, double dfRunningReward)
208 {
209 GetStatusArgs args = new GetStatusArgs(0, nIteration, nEpisodeCount, 1000000, dfRunningReward, dfRewardSum, 0, 0, 0, 0);
210 m_icallback.OnUpdateStatus(args);
211 }
212
219 public byte[] Run(int nIterations, out string type)
220 {
221 IxTrainerCallbackRNN icallback = m_icallback as IxTrainerCallbackRNN;
222 if (icallback == null)
223 throw new Exception("The Run method requires an IxTrainerCallbackRNN interface to convert the results into the native format!");
224
225 StateBase s = getData(Phase.RUN, -1);
226 int nIteration = 0;
227 List<float> rgResults = new List<float>();
228
229 while (!m_brain.Cancel.WaitOne(0) && (nIterations == -1 || nIteration < nIterations))
230 {
231 // Preprocess the observation.
232 SimpleDatum x = m_brain.Preprocess(s, m_bUseRawInput);
233
234 // Forward the policy network and sample an action.
235 float[] rgfAprob;
236 int action = m_brain.act(x, s.Clip, out rgfAprob);
237
238 rgResults.Add(s.Data.TimeStamp.ToFileTime());
239 rgResults.Add((float)s.Data.GetDataAtF(0));
240 rgResults.Add(action);
241
242 // Take the next step using the action
243 StateBase s_ = getData(Phase.RUN, action);
244 nIteration++;
245 }
246
247 ConvertOutputArgs args = new ConvertOutputArgs(nIterations, rgResults.ToArray());
248 icallback.OnConvertOutput(args);
249
250 type = args.RawType;
251 return args.RawOutput;
252 }
253
254 private bool isAtIteration(int nN, ITERATOR_TYPE type, int nIteration, int nEpisode)
255 {
256 if (nN == -1)
257 return false;
258
259 if (type == ITERATOR_TYPE.EPISODE)
260 {
261 if (nEpisode < nN)
262 return false;
263
264 return true;
265 }
266 else
267 {
268 if (nIteration < nN)
269 return false;
270
271 return true;
272 }
273 }
274
286 public void Run(Phase phase, int nN, ITERATOR_TYPE type, TRAIN_STEP step)
287 {
288 MemoryCollection m_rgMemory = new MemoryCollection();
289 double? dfRunningReward = null;
290 double dfEpisodeReward = 0;
291 int nEpisode = 0;
292 int nIteration = 0;
293
294 StateBase s = getData(phase, -1);
295
296 while (!m_brain.Cancel.WaitOne(0) && !isAtIteration(nN, type, nIteration, nEpisode))
297 {
298 // Preprocess the observation.
299 SimpleDatum x = m_brain.Preprocess(s, m_bUseRawInput);
300
301 // Forward the policy network and sample an action.
302 float[] rgfAprob;
303 int action = m_brain.act(x, s.Clip, out rgfAprob);
304
305 if (step == TRAIN_STEP.FORWARD)
306 return;
307
308 // Take the next step using the action
309 StateBase s_ = getData(phase, action);
310 dfEpisodeReward += s_.Reward;
311
312 if (phase == Phase.TRAIN)
313 {
314 // Build up episode memory, using reward for taking the action.
315 m_rgMemory.Add(new MemoryItem(s, x, action, rgfAprob, (float)s_.Reward));
316
317 // An episode has finished.
318 if (s_.Done)
319 {
320 nEpisode++;
321 nIteration++;
322
323 m_brain.Reshape(m_rgMemory);
324
325 // Compute the discounted reward (backwards through time)
326 float[] rgDiscountedR = m_rgMemory.GetDiscountedRewards(m_fGamma, m_bAllowDiscountReset);
327 // Rewards are standardized when set to be unit normal (helps control the gradient estimator variance)
328 m_brain.SetDiscountedR(rgDiscountedR);
329
330 // Get the action probabilities.
331 float[] rgfAprobSet = m_rgMemory.GetActionProbabilities();
332 // The action probabilities are used to calculate the initial gradient within the loss function.
333 m_brain.SetActionProbabilities(rgfAprobSet);
334
335 // Get the action one-hot vectors. When using Softmax, this contains the one-hot vector containing
336 // each action set (e.g. 3 actions with action 0 set would return a vector <1,0,0>).
337 // When using a binary probability (e.g. with Sigmoid), the each action set only contains a
338 // single element which is set to the action value itself (e.g. 0 for action '0' and 1 for action '1')
339 float[] rgfAonehotSet = m_rgMemory.GetActionOneHotVectors();
340 m_brain.SetActionOneHotVectors(rgfAonehotSet);
341
342 // Train for one iteration, which triggers the loss function.
343 List<Datum> rgData = m_rgMemory.GetData();
344 List<Datum> rgClip = m_rgMemory.GetClip();
345 m_brain.SetData(rgData, rgClip);
346 m_brain.Train(nIteration, step);
347
348 // Update reward running
349 if (!dfRunningReward.HasValue)
350 dfRunningReward = dfEpisodeReward;
351 else
352 dfRunningReward = dfRunningReward * 0.99 + dfEpisodeReward * 0.01;
353
354 updateStatus(nIteration, nEpisode, dfEpisodeReward, dfRunningReward.Value);
355 dfEpisodeReward = 0;
356
357 s = getData(phase, -1);
358 m_rgMemory.Clear();
359
360 if (step != TRAIN_STEP.NONE)
361 return;
362 }
363 else
364 {
365 s = s_;
366 }
367 }
368 else
369 {
370 if (s_.Done)
371 {
372 nEpisode++;
373
374 // Update reward running
375 if (!dfRunningReward.HasValue)
376 dfRunningReward = dfEpisodeReward;
377 else
378 dfRunningReward = dfRunningReward * 0.99 + dfEpisodeReward * 0.01;
379
380 updateStatus(nIteration, nEpisode, dfEpisodeReward, dfRunningReward.Value);
381 dfEpisodeReward = 0;
382
383 s = getData(phase, -1);
384 }
385 else
386 {
387 s = s_;
388 }
389
390 nIteration++;
391 }
392 }
393 }
394 }
395
396 class Brain<T> : IDisposable
397 {
398 MyCaffeControl<T> m_mycaffe;
399 Net<T> m_net;
400 Solver<T> m_solver;
401 MemoryDataLayer<T> m_memData;
402 MemoryLossLayer<T> m_memLoss;
403 SoftmaxLayer<T> m_softmax = null;
404 SoftmaxCrossEntropyLossLayer<T> m_softmaxCe = null;
405 bool m_bSoftmaxCeSetup = false;
406 PropertySet m_properties;
407 CryptoRandom m_random;
408 BlobCollection<T> m_colAccumulatedGradients = new BlobCollection<T>();
409 Blob<T> m_blobDiscountedR;
410 Blob<T> m_blobPolicyGradient;
411 Blob<T> m_blobActionOneHot;
412 Blob<T> m_blobDiscountedR1;
413 Blob<T> m_blobPolicyGradient1;
414 Blob<T> m_blobActionOneHot1;
415 Blob<T> m_blobLoss;
416 Blob<T> m_blobAprobLogit;
417 bool m_bSkipLoss;
418 int m_nMiniBatch = 10;
419 SimpleDatum m_sdLast = null;
420 int m_nRecurrentSequenceLength = 0;
421 List<Datum> m_rgData = null;
422 List<Datum> m_rgClip = null;
423
424 public Brain(MyCaffeControl<T> mycaffe, PropertySet properties, CryptoRandom random, Phase phase)
425 {
426 m_mycaffe = mycaffe;
427 m_net = mycaffe.GetInternalNet(phase);
428 m_solver = mycaffe.GetInternalSolver();
429 m_properties = properties;
430 m_random = random;
431
432 m_memData = m_net.FindLayer(LayerParameter.LayerType.MEMORYDATA, null) as MemoryDataLayer<T>;
433 m_memLoss = m_net.FindLayer(LayerParameter.LayerType.MEMORY_LOSS, null) as MemoryLossLayer<T>;
434 m_softmax = m_net.FindLayer(LayerParameter.LayerType.SOFTMAX, null) as SoftmaxLayer<T>;
435
436 if (m_memData == null)
437 throw new Exception("Could not find the MemoryData Layer!");
438
439 if (m_memLoss == null)
440 throw new Exception("Could not find the MemoryLoss Layer!");
441
442 m_memData.OnDataPack += memData_OnDataPack;
443 m_memLoss.OnGetLoss += memLoss_OnGetLoss;
444
445 m_blobDiscountedR = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
446 m_blobPolicyGradient = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
447 m_blobActionOneHot = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
448 m_blobDiscountedR1 = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
449 m_blobPolicyGradient1 = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
450 m_blobActionOneHot1 = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
451 m_blobLoss = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
452 m_blobAprobLogit = new Blob<T>(mycaffe.Cuda, mycaffe.Log);
453
454 if (m_softmax != null)
455 {
456 LayerParameter p = new LayerParameter(LayerParameter.LayerType.SOFTMAXCROSSENTROPY_LOSS);
457 p.loss_weight.Add(1);
458 p.loss_weight.Add(0);
460 m_softmaxCe = new SoftmaxCrossEntropyLossLayer<T>(mycaffe.Cuda, mycaffe.Log, p);
461 }
462
463 m_colAccumulatedGradients = m_net.learnable_parameters.Clone();
464 m_colAccumulatedGradients.SetDiff(0);
465
466 int nMiniBatch = mycaffe.CurrentProject.GetBatchSize(phase);
467 if (nMiniBatch != 0)
468 m_nMiniBatch = nMiniBatch;
469
470 m_nMiniBatch = m_properties.GetPropertyAsInt("MiniBatch", m_nMiniBatch);
471 }
472
473 private void dispose(ref Blob<T> b)
474 {
475 if (b != null)
476 {
477 b.Dispose();
478 b = null;
479 }
480 }
481
482 public void Dispose()
483 {
484 m_memLoss.OnGetLoss -= memLoss_OnGetLoss;
485 dispose(ref m_blobDiscountedR);
486 dispose(ref m_blobPolicyGradient);
487 dispose(ref m_blobActionOneHot);
488 dispose(ref m_blobDiscountedR1);
489 dispose(ref m_blobPolicyGradient1);
490 dispose(ref m_blobActionOneHot1);
491 dispose(ref m_blobLoss);
492 dispose(ref m_blobAprobLogit);
493
494 if (m_colAccumulatedGradients != null)
495 {
496 m_colAccumulatedGradients.Dispose();
497 m_colAccumulatedGradients = null;
498 }
499 }
500
501 public int RecurrentSequenceLength
502 {
503 get { return m_nRecurrentSequenceLength; }
504 }
505
506 public int Reshape(MemoryCollection col)
507 {
508 int nNum = col.Count;
509 int nChannels = col[0].Data.Channels;
510 int nHeight = col[0].Data.Height;
511 int nWidth = col[0].Data.Height;
512 int nActionProbs = 1;
513 int nFound = 0;
514
515 for (int i = 0; i < m_net.output_blobs.Count; i++)
516 {
517 if (m_net.output_blobs[i].type != BLOB_TYPE.LOSS)
518 {
519 int nCh = m_net.output_blobs[i].channels;
520 nActionProbs = Math.Max(nCh, nActionProbs);
521 nFound++;
522 }
523 }
524
525 if (nFound == 0)
526 throw new Exception("Could not find a non-loss output! Your model should output the loss and the action probabilities.");
527
528 m_blobDiscountedR.Reshape(nNum, nActionProbs, 1, 1);
529 m_blobPolicyGradient.Reshape(nNum, nActionProbs, 1, 1);
530 m_blobActionOneHot.Reshape(nNum, nActionProbs, 1, 1);
531 m_blobDiscountedR1.Reshape(nNum, nActionProbs, 1, 1);
532 m_blobPolicyGradient1.Reshape(nNum, nActionProbs, 1, 1);
533 m_blobActionOneHot1.Reshape(nNum, nActionProbs, 1, 1);
534 m_blobLoss.Reshape(1, 1, 1, 1);
535
536 return nActionProbs;
537 }
538
539 public void SetDiscountedR(float[] rg)
540 {
541 double dfMean = m_blobDiscountedR.mean(rg);
542 double dfStd = m_blobDiscountedR.std(dfMean, rg);
543 int nC = m_blobDiscountedR.channels;
544
545 // Fill all items in each channel with the same discount value.
546 if (nC > 1)
547 {
548 List<float> rgR = new List<float>();
549
550 for (int i = 0; i < rg.Length; i++)
551 {
552 for (int j = 0; j < nC; j++)
553 {
554 rgR.Add(rg[i]);
555 }
556 }
557
558 rg = rgR.ToArray();
559 }
560
561 m_blobDiscountedR.SetData(Utility.ConvertVec<T>(rg));
562 m_blobDiscountedR.NormalizeData(dfMean, dfStd);
563 }
564
565 public void SetActionProbabilities(float[] rg)
566 {
567 m_blobPolicyGradient.SetData(Utility.ConvertVec<T>(rg));
568 }
569
570 public void SetActionOneHotVectors(float[] rg)
571 {
572 m_blobActionOneHot.SetData(Utility.ConvertVec<T>(rg));
573 }
574
575 public void SetData(List<Datum> rgData, List<Datum> rgClip)
576 {
577 if (m_nRecurrentSequenceLength != 1 && rgData.Count > 1 && rgClip != null)
578 {
579 m_rgData = rgData;
580 m_rgClip = rgClip;
581 }
582 else
583 {
584 m_memData.AddDatumVector(rgData, rgClip, 1, true, true);
585 m_rgData = null;
586 m_rgClip = null;
587 }
588 }
589
590 public GetDataArgs getDataArgs(Phase phase, int nAction)
591 {
592 bool bReset = (nAction == -1) ? true : false;
593 return new GetDataArgs(phase, 0, m_mycaffe, m_mycaffe.Log, m_mycaffe.CancelEvent, bReset, nAction, true);
594 }
595
596 public Log Log
597 {
598 get { return m_mycaffe.Log; }
599 }
600
601 public CancelEvent Cancel
602 {
603 get { return m_mycaffe.CancelEvent; }
604 }
605
606 public SimpleDatum Preprocess(StateBase s, bool bUseRawInput)
607 {
608 SimpleDatum sd = new SimpleDatum(s.Data, true);
609
610 if (bUseRawInput)
611 return sd;
612
613 if (m_sdLast == null)
614 sd.Zero();
615 else
616 sd.Sub(m_sdLast);
617
618 m_sdLast = s.Data;
619
620 return sd;
621 }
622
623 public int act(SimpleDatum sd, SimpleDatum sdClip, out float[] rgfAprob)
624 {
625 List<Datum> rgData = new List<Datum>();
626 rgData.Add(new Datum(sd));
627 double dfLoss;
628 float fRandom = (float)m_random.NextDouble(); // Roll the dice.
629 List<Datum> rgClip = null;
630
631 if (sdClip != null)
632 {
633 rgClip = new List<Datum>();
634 rgClip.Add(new Datum(sdClip));
635 }
636
637 m_memData.AddDatumVector(rgData, rgClip, 1, true, true);
638 m_bSkipLoss = true;
639 BlobCollection<T> res = m_net.Forward(out dfLoss);
640 m_bSkipLoss = false;
641
642 rgfAprob = null;
643
644 for (int i = 0; i < res.Count; i++)
645 {
646 if (res[i].type != BLOB_TYPE.LOSS)
647 {
648 int nStart = 0;
649 // When using recurrent learning, only act on the last outputs.
650 if (m_nRecurrentSequenceLength > 1 && res[i].num > 1)
651 {
652 int nCount = res[i].count();
653 int nOutput = nCount / res[i].num;
654 nStart = nCount - nOutput;
655
656 if (nStart < 0)
657 throw new Exception("The start must be zero or greater!");
658 }
659
660 rgfAprob = Utility.ConvertVecF<T>(res[i].update_cpu_data(), nStart);
661 break;
662 }
663 }
664
665 if (rgfAprob == null)
666 throw new Exception("Could not find a non-loss output! Your model should output the loss and the action probabilities.");
667
668 // Select the action from the probability distribution.
669 float fSum = 0;
670 for (int i = 0; i < rgfAprob.Length; i++)
671 {
672 fSum += rgfAprob[i];
673
674 if (fRandom < fSum)
675 return i;
676 }
677
678 if (rgfAprob.Length == 1)
679 return 1;
680
681 return rgfAprob.Length - 1;
682 }
683
684 private void prepareBlob(Blob<T> b1, Blob<T> b)
685 {
686 b1.CopyFrom(b, 0, 0, b1.count(), true, true);
687 b.Reshape(1, b.channels, b.height, b.width);
688 }
689
690 private void copyBlob(int nIdx, Blob<T> src, Blob<T> dst)
691 {
692 int nCount = dst.count();
693 dst.CopyFrom(src, nIdx * nCount, 0, nCount, true, false);
694 }
695
696 public void Train(int nIteration, TRAIN_STEP step)
697 {
698 m_mycaffe.Log.Enable = false;
699
700 // Run data/clip groups > 1 in non batch mode.
701 if (m_nRecurrentSequenceLength != 1 && m_rgData != null && m_rgData.Count > 1 && m_rgClip != null)
702 {
703 prepareBlob(m_blobActionOneHot1, m_blobActionOneHot);
704 prepareBlob(m_blobDiscountedR1, m_blobDiscountedR);
705 prepareBlob(m_blobPolicyGradient1, m_blobPolicyGradient);
706
707 for (int i = 0; i < m_rgData.Count; i++)
708 {
709 copyBlob(i, m_blobActionOneHot1, m_blobActionOneHot);
710 copyBlob(i, m_blobDiscountedR1, m_blobDiscountedR);
711 copyBlob(i, m_blobPolicyGradient1, m_blobPolicyGradient);
712
713 List<Datum> rgData1 = new List<Datum>() { m_rgData[i] };
714 List<Datum> rgClip1 = new List<Datum>() { m_rgClip[i] };
715
716 m_memData.AddDatumVector(rgData1, rgClip1, 1, true, true);
717
718 m_solver.Step(1, step, true, false, true, true);
719 }
720
721 m_blobActionOneHot.ReshapeLike(m_blobActionOneHot1);
722 m_blobDiscountedR.ReshapeLike(m_blobDiscountedR1);
723 m_blobPolicyGradient.ReshapeLike(m_blobPolicyGradient1);
724
725 m_rgData = null;
726 m_rgClip = null;
727 }
728 else
729 {
730 m_solver.Step(1, step, true, false, true, true);
731 }
732
733 m_colAccumulatedGradients.Accumulate(m_mycaffe.Cuda, m_net.learnable_parameters, true);
734
735 if (nIteration % m_nMiniBatch == 0 || step == TRAIN_STEP.BACKWARD || step == TRAIN_STEP.BOTH)
736 {
737 m_net.learnable_parameters.CopyFrom(m_colAccumulatedGradients, true);
738 m_colAccumulatedGradients.SetDiff(0);
739 m_solver.ApplyUpdate(nIteration);
740 m_net.ClearParamDiffs();
741 }
742
743 m_mycaffe.Log.Enable = true;
744 }
745
746 private T[] unpackLabel(Datum d)
747 {
748 if (d.DataCriteria == null)
749 return null;
750
751 if (d.DataCriteriaFormat == SimpleDatum.DATA_FORMAT.LIST_FLOAT)
752 {
753 List<float> rgf = BinaryData.UnPackFloatList(d.DataCriteria, SimpleDatum.DATA_FORMAT.LIST_FLOAT);
754 return Utility.ConvertVec<T>(rgf.ToArray());
755 }
756 else if (d.DataCriteriaFormat == SimpleDatum.DATA_FORMAT.LIST_DOUBLE)
757 {
758 List<double> rgf = BinaryData.UnPackDoubleList(d.DataCriteria, SimpleDatum.DATA_FORMAT.LIST_DOUBLE);
759 return Utility.ConvertVec<T>(rgf.ToArray());
760 }
761
762 return null;
763 }
764
774 private void memData_OnDataPack(object sender, MemoryDataLayerPackDataArgs<T> e)
775 {
776 List<int> rgDataShape = e.Data.shape();
777 List<int> rgClipShape = e.Clip.shape();
778 List<int> rgLabelShape = e.Label.shape();
779 int nBatch = e.DataItems.Count;
780 int nSeqLen = rgDataShape[0];
781
782 e.Data.Log.CHECK_GT(nSeqLen, 0, "The sequence lenth must be greater than zero!");
783 e.Data.Log.CHECK_EQ(nBatch, e.ClipItems.Count, "The data and clip should have the same number of items.");
784 e.Data.Log.CHECK_EQ(nSeqLen, rgClipShape[0], "The data and clip should have the same sequence count.");
785
786 rgDataShape[1] = nBatch; // LSTM uses sizing: seq, batch, data1, data2
787 rgClipShape[1] = nBatch;
788 rgLabelShape[1] = nBatch;
789
790 e.Data.Reshape(rgDataShape);
791 e.Clip.Reshape(rgClipShape);
792 e.Label.Reshape(rgLabelShape);
793
794 T[] rgRawData = new T[e.Data.count()];
795 T[] rgRawClip = new T[e.Clip.count()];
796 T[] rgRawLabel = new T[e.Label.count()];
797
798 int nDataSize = e.Data.count(2);
799 T[] rgDataItem = new T[nDataSize];
800 T dfClip;
801 int nIdx;
802
803 for (int i = 0; i < nBatch; i++)
804 {
805 Datum data = e.DataItems[i];
806 Datum clip = e.ClipItems[i];
807
808 T[] rgLabel = unpackLabel(data);
809
810 for (int j = 0; j < nSeqLen; j++)
811 {
812 dfClip = clip.GetDataAt<T>(j);
813
814 for (int k = 0; k < nDataSize; k++)
815 {
816 rgDataItem[k] = data.GetDataAt<T>(j * nDataSize + k);
817 }
818
819 // LSTM: Create input data, the data must be in the order
820 // seq1_val1, seq2_val1, ..., seqBatch_Size_val1, seq1_val2, seq2_val2, ..., seqBatch_Size_valSequence_Length
821 if (e.LstmType == LayerParameter.LayerType.LSTM)
822 nIdx = nBatch * j + i;
823
824 // LSTM_SIMPLE: Create input data, the data must be in the order
825 // seq1_val1, seq1_val2, ..., seq1_valBatchSize, seq2_val1, seq2_val2, ..., seqSequenceLength_valBatchSize
826 else
827 nIdx = i * nBatch + j;
828
829 Array.Copy(rgDataItem, 0, rgRawData, nIdx * nDataSize, nDataSize);
830 rgRawClip[nIdx] = dfClip;
831
832 if (rgLabel != null)
833 {
834 if (rgLabel.Length == nSeqLen)
835 rgRawLabel[nIdx] = rgLabel[j];
836 else if (rgLabel.Length == 1)
837 {
838 if (j == nSeqLen - 1)
839 rgRawLabel[0] = rgLabel[0];
840 }
841 else
842 {
843 throw new Exception("The Solver SequenceLength parameter does not match the actual sequence length! The label length '" + rgLabel.Length.ToString() + "' must be either '1' for SINGLE labels, or the sequence length of '" + nSeqLen.ToString() + "' for MULTI labels. Stopping training.");
844 }
845 }
846 }
847 }
848
849 e.Data.mutable_cpu_data = rgRawData;
850 e.Clip.mutable_cpu_data = rgRawClip;
851 e.Label.mutable_cpu_data = rgRawLabel;
852 m_nRecurrentSequenceLength = nSeqLen;
853 }
854
870 private void memLoss_OnGetLoss(object sender, MemoryLossLayerGetLossArgs<T> e)
871 {
872 if (m_bSkipLoss)
873 return;
874
875 int nCount = m_blobPolicyGradient.count();
876 long hActionOneHot = m_blobActionOneHot.gpu_data;
877 long hPolicyGrad = m_blobPolicyGradient.mutable_gpu_data;
878 long hDiscountedR = m_blobDiscountedR.gpu_data;
879 double dfLoss;
880 Blob<T> blobOriginalBottom = e.Bottom[0];
881 int nDataSize = e.Bottom[0].count(1);
882 bool bUsingEndData = false;
883
884 // When using a recurrent model and receiving data with more than one sequence,
885 // copy and only use the last sequence data.
886 if (m_nRecurrentSequenceLength > 1)
887 {
888 if (e.Bottom[0].num > 1)
889 {
890 m_blobAprobLogit.CopyFrom(e.Bottom[0], false, true);
891 m_blobAprobLogit.CopyFrom(e.Bottom[0], true);
892
893 List<int> rgShape = e.Bottom[0].shape();
894 rgShape[0] = 1;
895 e.Bottom[0].Reshape(rgShape);
896 e.Bottom[0].CopyFrom(m_blobAprobLogit, (m_blobAprobLogit.num - 1) * nDataSize, 0, nDataSize, true, true);
897 bUsingEndData = true;
898 }
899 }
900
901 long hBottomDiff = e.Bottom[0].mutable_gpu_diff;
902
903 // Calculate the initial gradients (policy grad initially just contains the action probabilities)
904 if (m_softmax != null)
905 {
906 BlobCollection<T> colBottom = new BlobCollection<T>();
908
909 colBottom.Add(e.Bottom[0]); // aprob logit
910 colBottom.Add(m_blobActionOneHot); // action one-hot vectors
911 colTop.Add(m_blobLoss);
912 colTop.Add(m_blobPolicyGradient);
913
914 if (!m_bSoftmaxCeSetup)
915 {
916 m_softmaxCe.Setup(colBottom, colTop);
917 m_bSoftmaxCeSetup = true;
918 }
919
920 dfLoss = m_softmaxCe.Forward(colBottom, colTop);
921 m_softmaxCe.Backward(colTop, new List<bool>() { true, false }, colBottom);
922 hPolicyGrad = colBottom[0].gpu_diff;
923 }
924 else
925 {
926 // Calculate (a=0) ? 1-aprob : 0-aprob
927 m_mycaffe.Cuda.add_scalar(nCount, -1.0, hActionOneHot); // invert one hot
928 m_mycaffe.Cuda.abs(nCount, hActionOneHot, hActionOneHot);
929 m_mycaffe.Cuda.mul_scalar(nCount, -1.0, hPolicyGrad); // negate Aprob
930 m_mycaffe.Cuda.add(nCount, hActionOneHot, hPolicyGrad, hPolicyGrad); // gradient = ((a=0)?1:0) - Aprob
931 dfLoss = Utility.ConvertVal<T>(m_blobPolicyGradient.sumsq_data());
932
933 m_mycaffe.Cuda.mul_scalar(nCount, -1.0, hPolicyGrad); // invert for we ApplyUpdate subtracts the gradients
934 }
935
936 // Modulate the gradient with the advantage (PG magic happens right here.)
937 m_mycaffe.Cuda.mul(nCount, hPolicyGrad, hDiscountedR, hPolicyGrad);
938
939 e.Loss = dfLoss;
940 e.EnableLossUpdate = false; // apply gradients to bottom directly.
941
942 if (hPolicyGrad != hBottomDiff)
943 m_mycaffe.Cuda.copy(nCount, hPolicyGrad, hBottomDiff);
944
945 // When using recurrent model with more than one sequence of data, only
946 // copy the diff to the last in the sequence and zero out the rest in the sequence.
947 if (m_nRecurrentSequenceLength > 1 && bUsingEndData)
948 {
949 m_blobAprobLogit.SetDiff(0);
950 m_blobAprobLogit.CopyFrom(e.Bottom[0], 0, (m_blobAprobLogit.num - 1) * nDataSize, nDataSize, false, true);
951 e.Bottom[0].CopyFrom(m_blobAprobLogit, false, true);
952 e.Bottom[0].CopyFrom(m_blobAprobLogit, true);
953 }
954 }
955 }
956
957 class MemoryCollection : GenericList<MemoryItem>
958 {
959 public MemoryCollection()
960 {
961 }
962
963 public float[] GetDiscountedRewards(float fGamma, bool bAllowReset)
964 {
965 float fRunningAdd = 0;
966 float[] rgR = m_rgItems.Select(p => p.Reward).ToArray();
967 float[] rgDiscountedR = new float[rgR.Length];
968
969 for (int t = Count - 1; t >= 0; t--)
970 {
971 if (bAllowReset && rgR[t] != 0)
972 fRunningAdd = 0;
973
974 fRunningAdd = fRunningAdd * fGamma + rgR[t];
975 rgDiscountedR[t] = fRunningAdd;
976 }
977
978 return rgDiscountedR;
979 }
980
981 public float[] GetActionProbabilities()
982 {
983 List<float> rgfAprob = new List<float>();
984
985 for (int i = 0; i < m_rgItems.Count; i++)
986 {
987 rgfAprob.AddRange(m_rgItems[i].Aprob);
988 }
989
990 return rgfAprob.ToArray();
991 }
992
993 public float[] GetActionOneHotVectors()
994 {
995 List<float> rgfAonehot = new List<float>();
996
997 for (int i = 0; i < m_rgItems.Count; i++)
998 {
999 float[] rgfOneHot = new float[m_rgItems[0].Aprob.Length];
1000
1001 if (rgfOneHot.Length == 1)
1002 rgfOneHot[0] = m_rgItems[i].Action;
1003 else
1004 rgfOneHot[m_rgItems[i].Action] = 1;
1005
1006 rgfAonehot.AddRange(rgfOneHot);
1007 }
1008
1009 return rgfAonehot.ToArray();
1010 }
1011
1012 public List<Datum> GetData()
1013 {
1014 List<Datum> rgData = new List<Datum>();
1015
1016 for (int i = 0; i < m_rgItems.Count; i++)
1017 {
1018 rgData.Add(new Datum(m_rgItems[i].Data));
1019 }
1020
1021 return rgData;
1022 }
1023
1024 public List<Datum> GetClip()
1025 {
1026 if (m_rgItems.Count == 0)
1027 return null;
1028
1029 if (m_rgItems[0].State.Clip == null)
1030 return null;
1031
1032 List<Datum> rgData = new List<Datum>();
1033
1034 for (int i = 0; i < m_rgItems.Count; i++)
1035 {
1036 if (m_rgItems[i].State.Clip == null)
1037 return null;
1038
1039 rgData.Add(new Datum(m_rgItems[i].State.Clip));
1040 }
1041
1042 return rgData;
1043 }
1044 }
1045
1046 class MemoryItem
1047 {
1048 StateBase m_state;
1049 SimpleDatum m_x;
1050 int m_nAction;
1051 float[] m_rgfAprob;
1052 float m_fReward;
1053
1054 public MemoryItem(StateBase s, SimpleDatum x, int nAction, float[] rgfAprob, float fReward)
1055 {
1056 m_state = s;
1057 m_x = x;
1058 m_nAction = nAction;
1059 m_rgfAprob = rgfAprob;
1060 m_fReward = fReward;
1061 }
1062
1063 public StateBase State
1064 {
1065 get { return m_state; }
1066 }
1067
1068 public SimpleDatum Data
1069 {
1070 get { return m_x; }
1071 }
1072
1073 public int Action
1074 {
1075 get { return m_nAction; }
1076 }
1077
1078 public float Reward
1079 {
1080 get { return m_fReward; }
1081 }
1082
1086 public float[] Aprob
1087 {
1088 get { return m_rgfAprob; }
1089 }
1090
1091 public override string ToString()
1092 {
1093 return "action = " + m_nAction.ToString() + " reward = " + m_fReward.ToString("N2") + " aprob = " + tostring(m_rgfAprob);
1094 }
1095
1096 private string tostring(float[] rg)
1097 {
1098 string str = "{";
1099
1100 for (int i = 0; i < rg.Length; i++)
1101 {
1102 str += rg[i].ToString("N5");
1103 str += ",";
1104 }
1105
1106 str = str.TrimEnd(',');
1107 str += "}";
1108
1109 return str;
1110 }
1111 }
1112}
The MyCaffeControl is the main object used to manage all training, testing and running of the MyCaffe...
CancelEvent CancelEvent
Returns the CancelEvent used.
The BinaryData class is used to pack and unpack DataCriteria binary data, optionally stored within ea...
Definition: BinaryData.cs:15
static List< double > UnPackDoubleList(byte[] rg, DATA_FORMAT fmtExpected)
Unpack the byte array into a list of double values.
Definition: BinaryData.cs:75
static List< float > UnPackFloatList(byte[] rg, DATA_FORMAT fmtExpected)
Unpack the byte array into a list of float values.
Definition: BinaryData.cs:132
The CancelEvent provides an extension to the manual cancel event that allows for overriding the manua...
Definition: CancelEvent.cs:17
void Reset()
Resets the event clearing any signaled state.
Definition: CancelEvent.cs:279
CancelEvent()
The CancelEvent constructor.
Definition: CancelEvent.cs:28
void Set()
Sets the event to the signaled state.
Definition: CancelEvent.cs:270
The CryptoRandom is a random number generator that can use either the standard .Net Random objec or t...
Definition: CryptoRandom.cs:14
double NextDouble()
Returns a random double within the range .
Definition: CryptoRandom.cs:83
The Datum class is a simple wrapper to the SimpleDatum class to ensure compatibility with the origina...
Definition: Datum.cs:12
The GenericList provides a base used to implement a generic list by only implementing the minimum amo...
Definition: GenericList.cs:15
List< T > m_rgItems
The actual list of items.
Definition: GenericList.cs:19
The Log class provides general output in text form.
Definition: Log.cs:13
Log(string strSrc)
The Log constructor.
Definition: Log.cs:33
Specifies a key-value pair of properties.
Definition: PropertySet.cs:16
int GetPropertyAsInt(string strName, int nDefault=0)
Returns a property as an integer value.
Definition: PropertySet.cs:287
bool GetPropertyAsBool(string strName, bool bDefault=false)
Returns a property as a boolean value.
Definition: PropertySet.cs:267
double GetPropertyAsDouble(string strName, double dfDefault=0)
Returns a property as an double value.
Definition: PropertySet.cs:307
override string ToString()
Returns the string representation of the properties.
Definition: PropertySet.cs:325
The SimpleDatum class holds a data input within host memory.
Definition: SimpleDatum.cs:161
void Copy(SimpleDatum d, bool bCopyData, int? nHeight=null, int? nWidth=null)
Copy another SimpleDatum into this one.
bool Sub(SimpleDatum sd, bool bSetNegativeToZero=false)
Subtract the data of another SimpleDatum from this one, so this = this - sd.
void Zero()
Zero out all data in the datum but keep the size and other settings.
SimpleDatum Add(SimpleDatum d)
Creates a new SimpleDatum and adds another SimpleDatum to it.
override string ToString()
Return a string representation of the SimpleDatum.
byte[] DataCriteria
Get/set data criteria associated with the data.
DATA_FORMAT
Defines the data format of the DebugData and DataCriteria when specified.
Definition: SimpleDatum.cs:223
DATA_FORMAT DataCriteriaFormat
Get/set the data format of the data criteria.
The Utility class provides general utility funtions.
Definition: Utility.cs:35
static double[] ConvertVec(float[] rgf)
Convert an array of float to an array of generics.
Definition: Utility.cs:550
The BlobCollection contains a list of Blobs.
void Dispose()
Release all resource used by the collection and its Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void Accumulate(CudaDnn< T > cuda, BlobCollection< T > src, bool bAccumulateDiff)
Accumulate the diffs from one BlobCollection into another.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
int channels
DEPRECIATED; legacy shape accessor channels: use shape(1) instead.
Definition: Blob.cs:800
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
Definition: Blob.cs:1922
int height
DEPRECIATED; legacy shape accessor height: use shape(2) instead.
Definition: Blob.cs:808
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
double std(double? dfMean=null, float[] rgDf=null)
Calculate the standard deviation of the blob data.
Definition: Blob.cs:3007
double mean(float[] rgDf=null, bool bDiff=false)
Calculate the mean of the blob data.
Definition: Blob.cs:2965
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
Definition: Blob.cs:903
int width
DEPRECIATED; legacy shape accessor width: use shape(3) instead.
Definition: Blob.cs:816
T sumsq_data()
Calcualte the sum of squares (L2 norm squared) of the data.
Definition: Blob.cs:1730
void NormalizeData(double? dfMean=null, double? dfStd=null)
Normalize the blob data by subtracting the mean and dividing by the standard deviation.
Definition: Blob.cs:2942
int count()
Returns the total number of items in the Blob.
Definition: Blob.cs:739
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
Definition: Blob.cs:648
void SetDiff(double dfVal, int nIdx=-1)
Either sets all of the diff items in the Blob to a given value, or alternatively only sets a single i...
Definition: Blob.cs:1981
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
Definition: Blob.cs:792
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
Connects Layer's together into a direct acrylic graph (DAG) specified by a NetParameter
Definition: Net.cs:23
BlobCollection< T > Forward()
Run forward with the input Blob's already fed separately.
Definition: Net.cs:1445
Layer< T > FindLayer(LayerParameter.LayerType? type, string strName)
Find the layer with the matching type, name and or both.
Definition: Net.cs:2748
BlobCollection< T > output_blobs
Returns the collection of output Blobs.
Definition: Net.cs:2209
void ClearParamDiffs()
Zero out the diffs of all netw parameters. This should be run before Backward.
Definition: Net.cs:1907
BlobCollection< T > learnable_parameters
Returns the learnable parameters.
Definition: Net.cs:2117
The ResultCollection contains the result of a given CaffeControl::Run.
void Backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Given the top Blob error gradients, compute the bottom Blob error gradients.
Definition: Layer.cs:815
double Forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Given the bottom (input) Blobs, this function computes the top (output) Blobs and the loss.
Definition: Layer.cs:728
void Setup(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Implements common Layer setup functionality.
Definition: Layer.cs:439
The MemoryDataLayer provides data to the Net from memory. This layer is initialized with the MyCaffe....
virtual void AddDatumVector(Datum[] rgData, Datum[] rgClip=null, int nLblAxis=1, bool bReset=false, bool bResizeBatch=false)
This method is used to add a list of Datums to the memory.
EventHandler< MemoryDataLayerPackDataArgs< T > > OnDataPack
The OnDataPack event fires from within the AddDatumVector method and is used to pack the data into a ...
The MemoryDataLayerPackDataArgs is passed to the OnDataPack event which fires each time the data rece...
Blob< T > Label
Returns the label data to fill with ordered label information.
Blob< T > Clip
Returns the clip data to fill with ordered data for clipping.
List< Datum > ClipItems
Returns the raw clip items to use to fill.
LayerParameter.LayerType LstmType
Returns the LSTM type.
Blob< T > Data
Returns the blob data to fill with ordered data.
List< Datum > DataItems
Returns the raw data items to use to fill.
The MemoryLossLayerGetLossArgs class is passed to the OnGetLoss event.
bool EnableLossUpdate
Get/set enabling the loss update within the backpropagation pass.
double Loss
Get/set the externally calculated total loss.
BlobCollection< T > Bottom
Specifies the bottom passed in during the forward pass.
The MemoryLossLayer provides a method of performing a custom loss functionality. Similar to the Memor...
EventHandler< MemoryLossLayerGetLossArgs< T > > OnGetLoss
The OnGetLoss event fires during each forward pass. The value returned is saved, and applied on the b...
The SoftmaxCrossEntropyLossLayer computes the cross-entropy (logisitic) loss and is often used for pr...
The SoftmaxLayer computes the softmax function. This layer is initialized with the MyCaffe....
Definition: SoftmaxLayer.cs:24
Specifies the base parameter for all layers.
List< double > loss_weight
Specifies the loss weight.
LayerType
Specifies the layer type.
LossParameter loss_param
Returns the parameter set when initialized with LayerType.LOSS
Stores the parameters used by loss layers.
NormalizationMode
How to normalize the loss for loss layers that aggregate across batches, spatial dimensions,...
NormalizationMode? normalization
Specifies the normalization mode (default = VALID).
An interface for classes that perform optimization on Nets - this class serves as the base class for ...
Definition: Solver.cs:28
bool Step(int nIters, TRAIN_STEP step=TRAIN_STEP.NONE, bool bZeroDiffs=true, bool bApplyUpdates=true, bool bDisableOutput=false, bool bDisableProgress=false, double? dfLossOverride=null, bool? bAllowSnapshot=null)
Steps a set of iterations through a training cycle.
Definition: Solver.cs:818
abstract double ApplyUpdate(int nIterationOverride=-1)
Make and apply the update value for the current iteration.
The InitializeArgs is passed to the OnInitialize event.
Definition: EventArgs.cs:90
The WaitArgs is passed to the OnWait event.
Definition: EventArgs.cs:65
The TrainerPG implements a simple Policy Gradient trainer inspired by Andrej Karpathy's blog posed re...
Definition: TrainerPG.cs:26
bool Train(int nN, ITERATOR_TYPE type, TRAIN_STEP step)
Train the network using a modified PG training algorithm optimized for GPU use.
Definition: TrainerPG.cs:158
void Dispose()
Releases all resources used.
Definition: TrainerPG.cs:50
byte[] Run(int nN, PropertySet runProp, out string type)
Run a set of iterations and return the resuts.
Definition: TrainerPG.cs:116
bool Initialize()
Initialize the trainer.
Definition: TrainerPG.cs:58
bool Shutdown(int nWait)
Shutdown the trainer.
Definition: TrainerPG.cs:82
ResultCollection RunOne(int nDelay=1000)
Run a single cycle on the environment after the delay.
Definition: TrainerPG.cs:100
TrainerPG(MyCaffeControl< T > mycaffe, PropertySet properties, CryptoRandom random, IxTrainerCallback icallback)
The constructor.
Definition: TrainerPG.cs:39
bool Test(int nN, ITERATOR_TYPE type)
Run the test cycle - currently this is not implemented.
Definition: TrainerPG.cs:132
The IxTrainerCallback provides functions used by each trainer to 'call-back' to the parent for inform...
Definition: Interfaces.cs:303
The IxTrainerRL interface is implemented by each RL Trainer.
Definition: Interfaces.cs:257
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
Phase
Defines the Phase under which to run a Net.
Definition: Interfaces.cs:61
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
BLOB_TYPE
Defines the tpe of data held by a given Blob.
Definition: Interfaces.cs:62
TRAIN_STEP
Defines the training stepping method (if any).
Definition: Interfaces.cs:131
The MyCaffe.fillers namespace contains all fillers including the Filler class.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe.solvers namespace contains all solver classes, including the base Solver.
ITERATOR_TYPE
Specifies the iterator type to use.
Definition: Interfaces.cs:22
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12