MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
SmoothL1LossLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers.ssd
10{
29 public class SmoothL1LossLayer<T> : LossLayer<T>
30 {
31 Blob<T> m_blobDiff;
32 Blob<T> m_blobErrors;
33 bool m_bHasWeights;
34
48 : base(cuda, log, p)
49 {
50 m_type = LayerParameter.LayerType.SMOOTHL1_LOSS;
51
52 m_blobDiff = new Blob<T>(cuda, log, false);
53 m_blobDiff.Name = m_param.name + " diff";
54 m_blobErrors = new Blob<T>(cuda, log, false);
55 m_blobErrors.Name = m_param.name + " errors";
56
57 m_bHasWeights = false;
58 }
59
61 protected override void dispose()
62 {
63 if (m_blobDiff != null)
64 {
65 m_blobDiff.Dispose();
66 m_blobDiff = null;
67 }
68
69 if (m_blobErrors != null)
70 {
71 m_blobErrors.Dispose();
72 m_blobErrors = null;
73 }
74
75 base.dispose();
76 }
77
79 protected override void setup_internal_blobs(BlobCollection<T> col)
80 {
81 if (col.Count > 0)
82 return;
83
84 col.Add(m_blobDiff);
85 col.Add(m_blobErrors);
86 }
87
91 public override int ExactNumBottomBlobs
92 {
93 get { return -1; }
94 }
95
99 public override int MinBottomBlobs
100 {
101 get { return 2; }
102 }
103
107 public override int MaxBottomBlobs
108 {
109 get { return 3; }
110 }
111
118 public override bool AllowForceBackward(int nBottomIdx)
119 {
120 return true;
121 }
122
128 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
129 {
130 base.LayerSetUp(colBottom, colTop);
131
132 if (colBottom.Count == 3)
133 m_bHasWeights = true;
134 }
135
141 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
142 {
143 base.Reshape(colBottom, colTop);
144
145 m_log.CHECK_EQ(colBottom[0].channels, colBottom[1].channels, "The bottom(0) and bottom(1) must have the same channels.");
146 m_log.CHECK_EQ(colBottom[0].height, colBottom[1].height, "The bottom(0) and bottom(1) must have the same height.");
147 m_log.CHECK_EQ(colBottom[0].width, colBottom[1].width, "The bottom(0) and bottom(1) must have the same width.");
148
149 if (m_bHasWeights)
150 {
151 m_log.CHECK_EQ(colBottom[0].channels, colBottom[2].channels, "The bottom(0) and bottom(2) must have the same channels.");
152 m_log.CHECK_EQ(colBottom[0].height, colBottom[2].height, "The bottom(0) and bottom(2) must have the same height.");
153 m_log.CHECK_EQ(colBottom[0].width, colBottom[2].width, "The bottom(0) and bottom(2) must have the same width.");
154 }
155
156 m_blobDiff.ReshapeLike(colBottom[0]);
157 m_blobErrors.ReshapeLike(colBottom[0]);
158 }
159
178 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
179 {
180 int nCount = colBottom[0].count();
181
182 // d := b0 - b1
183 m_cuda.sub(nCount, colBottom[0].gpu_data, colBottom[1].gpu_data, m_blobDiff.mutable_gpu_data);
184
185 // d := w * (b0 - b1)
186 if (m_bHasWeights)
187 m_cuda.mul(nCount, colBottom[2].gpu_data, m_blobDiff.gpu_data, m_blobDiff.mutable_gpu_data);
188
189 m_cuda.smoothl1_fwd(nCount, m_blobDiff.gpu_data, m_blobErrors.mutable_gpu_data);
190
191 double dfLoss = Utility.ConvertVal<T>(m_blobErrors.asum_data());
192 colTop[0].SetData(dfLoss / colBottom[0].num, 0);
193 }
194
225 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
226 {
227 int nCount = m_blobDiff.count();
228
229 m_cuda.smoothl1_bwd(nCount, m_blobDiff.gpu_data, m_blobDiff.mutable_gpu_data);
230
231 for (int i = 0; i < 2; i++)
232 {
233 if (rgbPropagateDown[i])
234 {
235 double dfSign = (i == 0) ? 1 : -1;
236 double dfAlpha = Utility.ConvertVal<T>(colTop[0].GetDiff(0));
237
238 dfAlpha = dfSign * dfAlpha / colBottom[i].num;
239 m_cuda.axpby(colBottom[i].count(), dfAlpha, m_blobDiff.gpu_data, 0.0, colBottom[i].mutable_gpu_diff);
240 }
241 }
242 }
243 }
244}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
The Utility class provides general utility funtions.
Definition: Utility.cs:35
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
T asum_data()
Compute the sum of absolute values (L1 norm) of the data.
Definition: Blob.cs:1706
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
Definition: Blob.cs:648
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
Definition: Blob.cs:402
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
Definition: LossLayer.cs:23
Fast R-CNN Copyright (c) Microsoft Licensed under The MIT License [see fast-rcnn/LICENSE for details]...
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the smooth L1 loss error gradient w.r.t the predictions.
SmoothL1LossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (output) Blobs as variable.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int MinBottomBlobs
Returns the minimum number of required top (output) Blobs: loss, labels
override int MaxBottomBlobs
Returns the maximum number of required top (output) Blobs: loss, labels, weights
override bool AllowForceBackward(int nBottomIdx)
Unlike most loss layers, in the SmoothL1LossLayer we can backpropagate to both inputs – override to r...
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.ssd namespace contains all Single-Shot MultiBox (SSD) related layers.
Definition: LayerFactory.cs:19
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12