MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
EuclideanLossLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
33 public class EuclideanLossLayer<T> : LossLayer<T>
34 {
35 Blob<T> m_blobDiff;
36
44 : base(cuda, log, p)
45 {
46 m_type = LayerParameter.LayerType.EUCLIDEAN_LOSS;
47 m_blobDiff = new Blob<T>(cuda, log);
48 m_blobDiff.Name = m_param.name + " diff";
49 }
50
52 protected override void dispose()
53 {
54 m_blobDiff.Dispose();
55 base.dispose();
56 }
57
64 public override bool AllowForceBackward(int nBottomIdx)
65 {
66 return true;
67 }
68
74 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
75 {
76 base.LayerSetUp(colBottom, colTop);
77
78 // Setup the convert to half flags used by the Layer just before calling forward and backward.
80 m_bConvertTopOnBwd = false;
81 }
82
88 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
89 {
90 base.Reshape(colBottom, colTop);
91
92 m_log.CHECK_EQ(colBottom[0].count(1), colBottom[1].count(1), "Inputs must have the same dimension.");
93 m_blobDiff.ReshapeLike(colBottom[0], colBottom[0].HalfSize);
94 }
95
111 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
112 {
113 int nCount = colBottom[0].count();
114 long hData = m_blobDiff.gpu_data;
115
116 m_cuda.sub(nCount, colBottom[0].gpu_data, colBottom[1].gpu_data, hData);
117
118 if (m_blobDiff.HalfSize)
119 hData = convert_to_full(nCount, hData);
120
121 T fDot = m_cuda.dot(nCount, hData, hData);
122 double dfLoss = convertD(fDot) / colBottom[0].num / 2.0;
123
124 colTop[0].SetData(dfLoss, 0);
125 }
126
161 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
162 {
163 for (int i = 0; i < 2; i++)
164 {
165 if (rgbPropagateDown[i])
166 {
167 double dfSign = (i == 0) ? 1 : -1;
168 double dfTopDiff = convertD(colTop[0].GetDiff(0));
169 double dfAlpha = dfSign * dfTopDiff / colBottom[i].num;
170 int nCount = colBottom[i].count();
171
172 m_cuda.axpby(nCount, convert(dfAlpha), m_blobDiff.gpu_data, m_tZero, colBottom[i].mutable_gpu_diff);
173 }
174 }
175 }
176 }
177}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
The EuclideanLossLayer computes the Euclidean (L2) loss for real-valued regression tasks.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the Euclidean error gradient w.r.t. the inputs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override bool AllowForceBackward(int nBottomIdx)
Unlike most loss layers, in the EuclideanLossLayer we can backpropagate to both inputs – override to ...
EuclideanLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The EuclideanLossLayer constructor
override void dispose()
Releases all GPU and host resources used by the Layer.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
long convert_to_full(int nCount, long hMem)
Convert half memory to full memory.
Definition: Layer.cs:514
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
T m_tZero
Specifies a generic type equal to 0.0.
Definition: Layer.cs:76
bool m_bConvertTopOnBwd
Specifies whether or not to convert the top on the backward pass when using half sized memory (typica...
Definition: Layer.cs:92
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
Definition: Layer.cs:84
double convertD(T df)
Converts a generic to a double value.
Definition: Layer.cs:1349
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
Definition: LossLayer.cs:23
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12