MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
MemoryLossLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
20 public class MemoryLossLayer<T> : LossLayer<T>
21 {
22 object m_userState = null;
23 bool m_bWarningMade = false;
24 bool m_bEnableLoss = true;
25
30 public event EventHandler<MemoryLossLayerGetLossArgs<T>> OnGetLoss;
31
45 : base(cuda, log, p)
46 {
47 m_type = LayerParameter.LayerType.MEMORY_LOSS;
48 }
49
51 protected override void dispose()
52 {
53 base.dispose();
54 }
55
59 public object user_state
60 {
61 get { return m_userState; }
62 set { m_userState = value; }
63 }
64
68 public override int ExactNumBottomBlobs
69 {
70 get { return -1; }
71 }
72
76 public override int MinBottomBlobs
77 {
78 get { return 1; }
79 }
80
84 public override int MaxBottomBlobs
85 {
86 get { return 2; }
87 }
88
92 public override int ExactNumTopBlobs
93 {
94 get { return 1; }
95 }
96
102 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
103 {
104 base.LayerSetUp(colBottom, colTop);
105
106 if (!m_param.loss_param.normalization.HasValue)
108 else
110
111 m_bWarningMade = false;
112 }
113
119 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
120 {
121 bool bUniformSize = true;
122 int nAxis = colBottom[0].CanonicalAxisIndex(1);
123 int nCount = colBottom[0].count(nAxis);
124
125 for (int i = 1; i < colBottom.Count; i++)
126 {
127 int nCount1 = colBottom[i].count(nAxis);
128 if (nCount1 != nCount)
129 {
130 bUniformSize = false;
131 break;
132 }
133 }
134
135 if (!bUniformSize)
136 {
137 if (!m_bWarningMade)
138 {
139 m_log.WriteLine("WARNING: The MemoryDataLayer bottoms are not of uniform size, so the normalization will be set to NONE.");
140 m_bWarningMade = true;
141 }
142
144 m_nOuterNum = 0;
145 m_nInnerNum = 0;
146 }
147 else
148 {
149 m_nOuterNum = colBottom[0].count(0, nAxis);
150 m_nInnerNum = colBottom[0].count(nAxis + 1);
151 }
152
153 List<int> rgLossShape = new List<int>(); // Loss layers output a scalar, 0 axes.
154 colTop[0].Reshape(rgLossShape);
155 colTop[0].type = BLOB_TYPE.LOSS;
156 }
157
176 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
177 {
178 if (OnGetLoss == null)
179 m_log.FAIL("The OnGetLoss event must be implemented. Make sure the SolverParameter 'custom_trainer' points to a trainer that connects the OnGetLoss event.");
180
181 double dfNormalizer = get_normalizer(m_normalization, -1);
182 MemoryLossLayerGetLossArgs<T> e = new MemoryLossLayerGetLossArgs<T>(colBottom, m_userState, dfNormalizer);
183 OnGetLoss(this, e);
184
185 m_bEnableLoss = e.EnableLossUpdate;
186 colTop[0].SetData(e.Loss / dfNormalizer, 0);
187 }
188
203 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
204 {
205 if (!rgbPropagateDown[0])
206 return;
207
208 double dfNormalizer = get_normalizer(m_normalization, -1);
209
210 // mutliply the loss by the loss weight (in top[0].diff)
211 if (m_bEnableLoss)
212 {
213 double dfLoss = convertD(colTop[0].GetData(0));
214
215 for (int i = 0; i < colBottom.Count; i++)
216 {
217 m_cuda.copy(colBottom[i].count(), colBottom[i].gpu_data, colBottom[i].mutable_gpu_diff);
218 m_cuda.mul_scalar(colBottom[i].count(), dfLoss, colBottom[i].mutable_gpu_diff);
219 }
220
221 dfNormalizer = 1.0;
222 }
223
224 double dfTopDiff = convertD(colTop[0].GetDiff(0)); // loss weight
225 double dfLossWeight = dfTopDiff / dfNormalizer;
226
227 // Apply the loss weight to the bottom diffs.
228 if (dfLossWeight != 1.0)
229 {
230 for (int i = 0; i < colBottom.Count; i++)
231 {
232 m_cuda.scal(colBottom[i].count(), convert(dfLossWeight), colBottom[i].mutable_gpu_diff);
233 }
234 }
235 }
236 }
237
241 public class MemoryLossLayerGetLossArgs<T> : EventArgs
242 {
243 object m_userState = null;
244 double m_dfLoss = 0;
245 double m_dfNormalizer = 1;
246 BlobCollection<T> m_colBottom;
247 bool m_bEnableLossUpdate = true;
248 object m_tag = null;
249
256 public MemoryLossLayerGetLossArgs(BlobCollection<T> colBottom, object userState, double dfNormalizer)
257 {
258 m_userState = userState;
259 m_colBottom = colBottom;
260 }
261
266 public object user_state
267 {
268 get { return m_userState; }
269 }
270
275 {
276 get { return m_colBottom; }
277 }
278
282 public double Normalizer
283 {
284 get { return m_dfNormalizer; }
285 }
286
290 public double Loss
291 {
292 get { return m_dfLoss; }
293 set { m_dfLoss = value; }
294 }
295
300 {
301 get { return m_bEnableLossUpdate; }
302 set { m_bEnableLossUpdate = value; }
303 }
304
308 public object Tag
309 {
310 get { return m_tag; }
311 set { m_tag = value; }
312 }
313 }
314}
The Log class provides general output in text form.
Definition: Log.cs:13
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
Definition: Log.cs:80
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
Definition: Log.cs:394
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
double convertD(T df)
Converts a generic to a double value.
Definition: Layer.cs:1349
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
Definition: LossLayer.cs:23
int m_nOuterNum
Specifies the outer num, such as the batch count (e.g. count(0, axis)). Each derivative class must se...
Definition: LossLayer.cs:39
int m_nInnerNum
Specifies the inner num, such as the channel + height + width (e.g. count(axis + 1))....
Definition: LossLayer.cs:43
virtual double get_normalizer(LossParameter.NormalizationMode normalization_mode, int nValidCount)
Returns the normalizer used to normalize the loss.
Definition: LossLayer.cs:92
LossParameter.NormalizationMode m_normalization
Specifies the normalization mode used to normalize the loss.
Definition: LossLayer.cs:35
The MemoryLossLayerGetLossArgs class is passed to the OnGetLoss event.
bool EnableLossUpdate
Get/set enabling the loss update within the backpropagation pass.
object user_state
Specifies a user-state.
double Normalizer
Specifies the normalizer.
double Loss
Get/set the externally calculated total loss.
BlobCollection< T > Bottom
Specifies the bottom passed in during the forward pass.
MemoryLossLayerGetLossArgs(BlobCollection< T > colBottom, object userState, double dfNormalizer)
The constructor.
object Tag
Get/set a user defined value.
The MemoryLossLayer provides a method of performing a custom loss functionality. Similar to the Memor...
MemoryLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
EventHandler< MemoryLossLayerGetLossArgs< T > > OnGetLoss
The OnGetLoss event fires during each forward pass. The value returned is saved, and applied on the b...
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: loss.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs as variable.
object user_state
Optionally specifies a user-state that is passed to the OnGetLoss event.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Backpropagates the previously acquired (within the forward pass) loss error gradient w....
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void dispose()
Releases all GPU and host resources used by the Layer.
override int MaxBottomBlobs
Returns the maximum number of required bottom (output) Blobs: input 1 and 2.
override int MinBottomBlobs
Returns the minimum number of required bottom (output) Blobs: input 1.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Specifies the base parameter for all layers.
LayerType
Specifies the layer type.
LossParameter loss_param
Returns the parameter set when initialized with LayerType.LOSS
Stores the parameters used by loss layers.
NormalizationMode
How to normalize the loss for loss layers that aggregate across batches, spatial dimensions,...
bool normalize
DEPRECIATED. Ignore if normalization is specified. If normalization is not specified,...
NormalizationMode? normalization
Specifies the normalization mode (default = VALID).
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
BLOB_TYPE
Defines the tpe of data held by a given Blob.
Definition: Interfaces.cs:62
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12