MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
NumericTransformationLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Diagnostics;
4using System.Linq;
5using System.Text;
6using MyCaffe.basecode;
7using MyCaffe.common;
8using MyCaffe.param;
9
10namespace MyCaffe.layers.tft
11{
23 public class NumericTransformationLayer<T> : Layer<T>
24 {
25 List<Layer<T>> m_rgIpLayers = new List<Layer<T>>();
27 BlobCollection<T> m_rgIpBtm = new BlobCollection<T>();
28 BlobCollection<T> m_rgIpTop = new BlobCollection<T>();
29
37 : base(cuda, log, p)
38 {
39 m_type = LayerParameter.LayerType.NUMERIC_TRANS;
40 }
41
43 protected override void dispose()
44 {
45 if (m_rgIpLayers != null)
46 {
47 foreach (Layer<T> layer in m_rgIpLayers)
48 {
49 layer.Dispose();
50 }
51 m_rgIpLayers = null;
52 }
53
54 if (m_rgBtm != null)
55 {
56 m_rgBtm.Dispose();
57 m_rgBtm = null;
58 }
59 }
60
62 protected override void setup_internal_blobs(BlobCollection<T> col)
63 {
64 if (col.Count > 0)
65 return;
66 }
67
71 public override int ExactNumBottomBlobs
72 {
73 get { return 1; }
74 }
75
79 public override int ExactNumTopBlobs
80 {
81 get { return (int)m_param.numeric_trans_param.num_input; }
82 }
83
89 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
90 {
91 int nOffset = (colBottom[0].num_axes == 2) ? 1 : 2;
92 int nDim = colBottom[0].count(0, nOffset);
93 int nNumInput = (int)m_param.numeric_trans_param.num_input;
94 int nCount = colBottom[0].count(nOffset);
95 int nSpatialDim = nCount / nNumInput;
96 List<int> rgShape = new List<int>() { nDim, nSpatialDim };
97 Blob<T> blobBtm = null;
98
99 m_rgIpBtm.Clear();
100 m_rgIpBtm.Add(blobBtm);
101 m_rgIpTop.Clear();
102 m_rgIpTop.Add(colTop[0]);
103
104 for (int i = 0; i < nNumInput; i++)
105 {
106 blobBtm = new Blob<T>(m_cuda, m_log);
107 blobBtm.Reshape(rgShape);
108 m_rgBtm.Add(blobBtm);
109
110 m_rgIpBtm[0] = m_rgBtm[i];
111 m_rgIpTop[0] = colTop[i];
112
113 LayerParameter p = new LayerParameter(LayerParameter.LayerType.INNERPRODUCT, m_param.name + ".ip" + i.ToString());
116
118 m_rgIpLayers.Add(ip_layer);
119
120 ip_layer.LayerSetUp(m_rgIpBtm, m_rgIpTop);
121 blobs.Add(ip_layer.blobs);
122 }
123 }
124
130 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
131 {
132 for (int i = 0; i < m_param.numeric_trans_param.num_input; i++)
133 {
134 m_rgIpBtm[0] = m_rgBtm[i];
135 m_rgIpTop[0] = colTop[i];
136 m_rgIpLayers[i].Reshape(m_rgIpBtm, m_rgIpTop);
137 }
138 }
139
151 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
152 {
153 for (int i = 0; i < m_param.numeric_trans_param.num_input; i++)
154 {
155 int nCount = m_rgBtm[i].count();
156 m_cuda.channel_copy(nCount, nCount, 1, (int)m_param.numeric_trans_param.num_input, 1, i, colBottom[0].gpu_data, m_rgBtm[i].mutable_gpu_data, DIR.FWD);
157
158 m_rgIpBtm[0] = m_rgBtm[i];
159 m_rgIpTop[0] = colTop[i];
160 m_rgIpLayers[i].Forward(m_rgIpBtm, m_rgIpTop);
161 }
162 }
163
179 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
180 {
181 for (int i = 0; i < m_param.numeric_trans_param.num_input; i++)
182 {
183 m_rgIpBtm[0] = m_rgBtm[i];
184 m_rgIpTop[0] = colTop[i];
185 m_rgIpLayers[i].Backward(m_rgIpTop, rgbPropagateDown, m_rgIpBtm);
186
187 // data fields do not have gradients so no gradients are output.
188 //int nCount = m_rgIpBtm[0].count();
189 //m_cuda.channel_copy(nCount, nCount, 1, (int)m_param.numeric_trans_param.num_input, 1, i, colBottom[0].mutable_gpu_diff, m_rgIpBtm[0].gpu_diff, DIR.BWD);
190 }
191 }
192 }
193}
The Log class provides general output in text form.
Definition: Log.cs:13
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Clear(bool bDispose=false)
Remove all items from the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
abstract void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Performs Layer specific setup. Derived layers should override this function as well as the Reshape fu...
void Dispose()
Releases all GPU and host resources used by the Layer.
Definition: Layer.cs:180
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
static Layer< T > Create(CudaDnn< T > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db=null, TransferInput trxinput=null)
Create a new Layer based on the LayerParameter.
Definition: Layer.cs:1468
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
Definition: Layer.cs:875
LayerParameter convertLayerParam(LayerParameter pChild, LayerParameter pParent)
Called to convert a parent LayerParameterEx, used in blob sharing, with a child layer parameter.
Definition: Layer.cs:1134
The NumericTransformationLayer implements the transforming/embeddings for the set of numeric input va...
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: norm
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the numeric value inputs.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: data
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
NumericTransformationLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The constructor.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
int axis
Specifies the first axis to be lumped into a single inner product computation; all preceding axes are...
uint num_output
The number of outputs for the layer.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
NumericTransformationParameter numeric_trans_param
Returns the parameter set when initialized with LayerType.NUMERIC_TRANS
InnerProductParameter inner_product_param
Returns the parameter set when initialized with LayerType.INNERPRODUCT
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
DIR
Defines the direction of data flow.
Definition: CudaDnn.cs:22
The MyCaffe.layers.tft namespace contains all TFT related layers.
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12