MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
LogLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8
9namespace MyCaffe.layers
10{
21 public class LogLayer<T> : NeuronLayer<T>
22 {
23 double m_dfBaseScale;
24 double m_dfInputScale;
25 double m_dfInputShift;
26 double m_dfBackwardNumScale;
27
41 : base(cuda, log, p)
42 {
44 }
45
51 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
52 {
53 base.LayerSetUp(colBottom, colTop);
54
55 double dfBase = m_param.log_param.base_val;
56
57 if (dfBase != -1)
58 m_log.CHECK_GT(dfBase, 0, "base_val must be strictly positive.");
59
60 // If base == -1, interpret the base as e and set log_base = 1 exactly.
61 // Otehrwise, calculate its log explicitly.
62 double dfLogBase = (dfBase == -1) ? 1 : Math.Log(dfBase);
63
64 m_log.CHECK(!double.IsNaN(dfLogBase), "NaN result: log(base) == log(" + dfBase.ToString() + ") = " + dfLogBase.ToString());
65 m_log.CHECK(!double.IsInfinity(dfLogBase), "Inf result: log(base) == log(" + dfBase.ToString() + ") = " + dfLogBase.ToString());
66
67 m_dfBaseScale = 1.0 / dfLogBase;
68
69 m_log.CHECK(!double.IsNaN(m_dfBaseScale), "NaN result: 1/log(base) == 1/log(" + dfBase.ToString() + ") = " + m_dfBaseScale.ToString());
70 m_log.CHECK(!double.IsInfinity(m_dfBaseScale), "Inf result: 1/log(base) == 1/log(" + dfBase.ToString() + ") = " + m_dfBaseScale.ToString());
71
72 m_dfInputScale = m_param.log_param.scale;
73 m_dfInputShift = m_param.log_param.shift;
74 m_dfBackwardNumScale = m_dfInputScale / dfLogBase;
75 }
76
86 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
87 {
88 int nCount = colBottom[0].count();
89 long hBottomData = colBottom[0].gpu_data;
90 long hTopData = colTop[0].mutable_gpu_data;
91
92 if (m_dfInputScale == 1.0 && m_dfInputShift == 0)
93 {
94 m_cuda.log(nCount, hBottomData, hTopData);
95 }
96 else
97 {
98 m_cuda.copy(nCount, hBottomData, hTopData);
99
100 if (m_dfInputScale != 1)
101 m_cuda.scal(nCount, convert(m_dfInputScale), hTopData);
102
103 if (m_dfInputShift != 0)
104 m_cuda.add_scalar(nCount, convert(m_dfInputShift), hTopData);
105
106 m_cuda.log(nCount, hTopData, hTopData);
107 }
108
109 if (m_dfBaseScale != 1)
110 m_cuda.scal(nCount, convert(m_dfBaseScale), hTopData);
111 }
112
123 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
124 {
125 if (!rgbPropagateDown[0])
126 return;
127
128 int nCount = colBottom[0].count();
129 long hBottomData = colBottom[0].gpu_data;
130 long hTopDiff = colTop[0].gpu_diff;
131 long hBottomDiff = colBottom[0].mutable_gpu_diff;
132
133 m_cuda.copy(nCount, hBottomData, hBottomDiff);
134
135 if (m_dfInputScale != 1.0)
136 m_cuda.scal(nCount, convert(m_dfInputScale), hBottomDiff);
137
138 if (m_dfInputShift != 0)
139 m_cuda.add_scalar(nCount, convert(m_dfInputShift), hBottomDiff);
140
141 m_cuda.powx(nCount, hBottomDiff, convert(-1.0), hBottomDiff);
142
143 if (m_dfBackwardNumScale != 1.0)
144 m_cuda.scal(nCount, convert(m_dfBackwardNumScale), hBottomDiff);
145
146 m_cuda.mul(nCount, hTopDiff, hBottomDiff, hBottomDiff);
147 }
148 }
149}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK(bool b, string str)
Test a flag for true.
Definition: Log.cs:227
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
Definition: Log.cs:299
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The LogLayer computes the log of the input. This layer is initialized with the MyCaffe....
Definition: LogLayer.cs:22
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Definition: LogLayer.cs:51
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the LOG value inputs.
Definition: LogLayer.cs:123
LogLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The LogLayer constructor.
Definition: LogLayer.cs:40
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
Definition: LogLayer.cs:86
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
Definition: NeuronLayer.cs:22
Specifies the base parameter for all layers.
LogParameter log_param
Returns the parameter set when initialized with LayerType.LOG
LayerType
Specifies the layer type.
double shift
Specifies the shift to use for the log, where , for base > 0.
Definition: LogParameter.cs:61
double base_val
Specifies the base to use for the log, where , for base > 0.
Definition: LogParameter.cs:41
double scale
Specifies the scale to use for the log, where , for base > 0.
Definition: LogParameter.cs:51
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12