MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
TransposeLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using MyCaffe.basecode;
6using MyCaffe.common;
7using MyCaffe.param;
8using MyCaffe.param.beta;
9
10namespace MyCaffe.layers.beta
11{
19 public class TransposeLayer<T> : Layer<T>
20 {
21 Blob<T> m_blobBottomCounts;
22 Blob<T> m_blobTopCounts;
23 Blob<T> m_blobForwardMap;
24 Blob<T> m_blobBackwardMap;
25 Blob<T> m_blobBuffer;
26 bool m_bForceReshape = false;
27 List<int> m_rgShape = new List<int>(4);
28
37 : base(cuda, log, p)
38 {
40
41 m_blobBottomCounts = new Blob<T>(cuda, log, false);
42 m_blobBottomCounts.Name = m_param.name + " bottom counts";
43 m_blobTopCounts = new Blob<T>(cuda, log, false);
44 m_blobTopCounts.Name = m_param.name + " top counts";
45 m_blobForwardMap = new Blob<T>(cuda, log, false);
46 m_blobForwardMap.Name = m_param.name + " forward map";
47 m_blobBackwardMap = new Blob<T>(cuda, log, false);
48 m_blobBackwardMap.Name = m_param.name + " backward map";
49 m_blobBuffer = new Blob<T>(cuda, log, false);
50 m_blobBuffer.Name = m_param.name + " buffer";
51
53 }
54
58 protected override void dispose()
59 {
60 dispose(ref m_blobBottomCounts);
61 dispose(ref m_blobTopCounts);
62 dispose(ref m_blobForwardMap);
63 dispose(ref m_blobBackwardMap);
64 dispose(ref m_blobBuffer);
65 base.dispose();
66 }
67
69 protected override void setup_internal_blobs(BlobCollection<T> col)
70 {
71 if (col.Count > 0)
72 return;
73
74 col.Add(m_blobBottomCounts);
75 col.Add(m_blobTopCounts);
76 col.Add(m_blobForwardMap);
77 col.Add(m_blobBackwardMap);
78 col.Add(m_blobBuffer);
79 }
80
81 private List<int> permute(List<int> rg)
82 {
83 List<int> rgNew = new List<int>();
84
85 m_log.CHECK_EQ(rg.Count, m_param.transpose_param.dim.Count, "The index array must be the same size as the transpose_param.dim array.");
86
87 for (int i = 0; i < rg.Count; i++)
88 {
89 int nAxis = m_param.transpose_param.dim[i];
90 int nDim = rg[nAxis];
91 rgNew.Add(nDim);
92 }
93
94 return rgNew;
95 }
96
100 public override int ExactNumBottomBlobs
101 {
102 get { return 1; }
103 }
104
108 public override int ExactNumTopBlobs
109 {
110 get { return 1; }
111 }
112
118 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
119 {
120 m_log.CHECK(colBottom[0] != colTop[0], "The Transpose layer does not support in-place computation.");
121 }
122
128 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
129 {
130 m_rgShape.Clear();
131 for (int i = 0; i < colBottom[0].num_axes; i++)
132 {
133 m_rgShape.Add(colBottom[0].shape(i));
134 }
135
136 while (m_rgShape.Count > m_param.transpose_param.dim.Count && m_rgShape[m_rgShape.Count - 1] == 1)
137 {
138 m_rgShape.RemoveAt(m_rgShape.Count - 1);
139 }
140
141 m_log.CHECK_GT(m_rgShape.Count, 0, "The dimension of the transposed blob should be greater than zero.");
142 m_log.CHECK_LE(m_rgShape.Count, Blob<T>.MAX_BLOB_AXES, "The dimension of the transposed blob should be less than " + Blob<T>.MAX_BLOB_AXES.ToString() + ".");
143 m_log.CHECK_EQ(m_rgShape.Count, m_param.transpose_param.dim.Count, "The dimension of the bottom blob must equal the number of dimensions in the transpose parameter.");
144
145 if (colBottom[0].num_axes != m_rgShape.Count)
146 colBottom[0].Reshape(m_rgShape);
147
148 List<int> rgTopShape = permute(m_rgShape);
149 colTop[0].Reshape(rgTopShape);
150
151 int nNumAxes = m_param.transpose_param.dim.Count;
152 List<int> rgShape = new List<int>();
153
154 rgShape.Add(nNumAxes);
155
156 shareLayerBlob(m_blobBottomCounts, rgShape);
157 m_blobBottomCounts.Reshape(rgShape);
158 shareLayerBlob(m_blobTopCounts, rgShape);
159 m_blobTopCounts.Reshape(rgShape);
160
161 List<float> rgBottomCounts = new List<float>();
162 List<float> rgTopCounts = new List<float>();
163
164 for (int i = 1; i < nNumAxes; i++)
165 {
166 rgBottomCounts.Add(colBottom[0].count(i));
167 rgTopCounts.Add(colTop[0].count(i));
168 }
169
170 rgBottomCounts.Add(1);
171 rgTopCounts.Add(1);
172
173 m_blobBottomCounts.mutable_cpu_data = convert(rgBottomCounts.ToArray());
174 m_blobTopCounts.mutable_cpu_data = convert(rgTopCounts.ToArray());
175
176 shareLayerBlob(m_blobForwardMap, rgShape);
177 m_blobForwardMap.Reshape(rgShape);
178 shareLayerBlob(m_blobBackwardMap, rgShape);
179 m_blobBackwardMap.Reshape(rgShape);
180
181 List<float> rgForwardMap = new List<float>();
182 List<float> rgBackwardMap = Utility.Create<float>(nNumAxes, 0);
183
184 for (int i = 0; i < nNumAxes; i++)
185 {
186 int nDim = m_param.transpose_param.dim[i];
187 rgForwardMap.Add(nDim);
188 rgBackwardMap[nDim] = i;
189 }
190
191 m_blobForwardMap.mutable_cpu_data = convert(rgForwardMap.ToArray());
192 m_blobBackwardMap.mutable_cpu_data = convert(rgBackwardMap.ToArray());
193
194 rgShape.Clear();
195 rgShape.Add(colBottom[0].count() * nNumAxes);
196
197 shareLayerBlob(m_blobBuffer, rgShape);
198 m_blobBuffer.Reshape(rgShape);
199 }
200
210 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
211 {
212 m_cuda.transpose(colBottom[0].count(), colBottom[0].gpu_data, colTop[0].mutable_gpu_data, m_blobBottomCounts.gpu_data, m_blobTopCounts.gpu_data, m_blobForwardMap.gpu_data, colBottom[0].shape().Count, m_blobBuffer.mutable_gpu_data);
213 }
214
223 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
224 {
225 if (!rgbPropagateDown[0])
226 return;
227
228 m_bForceReshape = true;
229 Reshape(colBottom, colTop);
230
231 m_cuda.transpose(colBottom[0].count(), colTop[0].gpu_diff, colBottom[0].mutable_gpu_diff, m_blobTopCounts.gpu_data, m_blobBottomCounts.gpu_data, m_blobBackwardMap.gpu_data, colBottom[0].shape().Count, m_blobBuffer.mutable_gpu_data);
232 }
233 }
234}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK(bool b, string str)
Test a flag for true.
Definition: Log.cs:227
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
Definition: Log.cs:299
void CHECK_LE(double df1, double df2, string str)
Test whether one number is less than or equal to another.
Definition: Log.cs:263
The Utility class provides general utility funtions.
Definition: Utility.cs:35
static List< int > Create(int nCount, int nStart, int nInc)
Create a new List and fill it with values starting with start and incrementing by inc.
Definition: Utility.cs:721
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
Definition: Blob.cs:1461
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
const int MAX_BLOB_AXES
Defines the maximum number of Axes supported by the Blob.
Definition: Blob.cs:55
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
Definition: Layer.cs:535
bool shareLayerBlob(Blob< T > b, List< int > rgMinShape)
Attempts to share a Layer Blob if another parameter Blob with the same name and acceptable size is fo...
Definition: Layer.cs:1170
BlobCollection< T > m_colInternalBlobs
Specifies internal blobs used by the layer.
Definition: Layer.cs:59
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The TransposeLayer performs a permute and transpose operation similar to numpy.transpose.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: flatten
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void dispose()
Release any resources used.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the concatenate inputs.
TransposeLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The TransposeLayer constructor.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
TransposeParameter transpose_param
Returns the parameter set when initialized with LayerType.TRANSPOSE
LayerType
Specifies the layer type.
List< int > dim
Specifies the dimensions to transpose.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.beta namespace contains all beta stage layers.
Definition: LayerFactory.cs:9
The MyCaffe.param.beta parameters are used by the MyCaffe.layer.beta layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12