MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
DataSequenceLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Diagnostics;
4using System.Linq;
5using System.Text;
6using MyCaffe.basecode;
7using MyCaffe.common;
8using MyCaffe.param;
9
10namespace MyCaffe.layers.beta
11{
29 public class DataSequenceLayer<T> : Layer<T>
30 {
31 int m_nK;
32 int m_nCacheSize;
33 bool m_bOutputLabels = false;
34 Blob<T> m_blobLabeledDataCache = null;
35 int m_nLabelStart = 0;
36 int m_nLabelCount = 0;
37 bool m_bBalanceMatches = false;
38 long m_hCacheCursors = 0;
39 long m_hWorkDataHost = 0;
40
53 : base(cuda, log, p)
54 {
55 m_param = p;
56 m_type = LayerParameter.LayerType.DATA_SEQUENCE;
58 m_nCacheSize = m_param.data_sequence_param.cache_size;
59 m_bOutputLabels = m_param.data_sequence_param.output_labels;
60 m_nLabelCount = m_param.data_sequence_param.label_count;
61 m_nLabelStart = m_param.data_sequence_param.label_start;
62 m_bBalanceMatches = m_param.data_sequence_param.balance_matches;
63 }
64
66 protected override void dispose()
67 {
68 if (m_blobLabeledDataCache != null)
69 {
70 m_blobLabeledDataCache.Dispose();
71 m_blobLabeledDataCache = null;
72 }
73
74 if (m_hCacheCursors != 0)
75 {
76 m_cuda.FreeHostBuffer(m_hCacheCursors);
77 m_hCacheCursors = 0;
78 m_nLabelCount = 0;
79 }
80
81 if (m_hWorkDataHost != 0)
82 {
83 m_cuda.FreeHostBuffer(m_hWorkDataHost);
84 m_hWorkDataHost = 0;
85 }
86
87 base.dispose();
88 }
89
93 public override int ExactNumBottomBlobs
94 {
95 get { return 2; } // data, label
96 }
97
101 public override int MinTopBlobs
102 {
103 get { return m_nK + 2 + ((m_bOutputLabels) ? 1 : 0); } // anchor, positive (if k > 0), negative * (1 + m_nK)
104 }
105
111 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
112 {
113 // Disable back-propagation for all outputs of this layer.
114 for (int i = 0; i < colBottom.Count; i++)
115 {
116 m_param.propagate_down.Add(false);
117 }
118 }
119
125 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
126 {
127 colTop[0].ReshapeLike(colBottom[0]);
128 colTop[1].ReshapeLike(colBottom[0]);
129
130 for (int k = 0; k < m_nK; k++)
131 {
132 colTop[2 + k].ReshapeLike(colBottom[0]);
133 }
134
135 if (m_bOutputLabels)
136 {
137 int nLabelDim = 2 + m_nK;
138 colTop[2 + m_nK].Reshape(colBottom[0].num, nLabelDim, 1, 1);
139 }
140 }
141
155 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
156 {
157 Blob<T> data = colBottom[0];
158 Blob<T> labels = colBottom[1];
159
160 if (m_blobLabeledDataCache == null)
161 {
162 List<int> rgLabels = new List<int>();
163
164 // Use dynamic label discovery - requires that all labels of the dataset are in the first batch.
165 if (m_nLabelCount == 0)
166 {
167 float[] rgfLabels = convertF(labels.update_cpu_data());
168
169 foreach (float fLabel in rgfLabels)
170 {
171 int nLabel = (int)fLabel;
172 if (!rgLabels.Contains(nLabel))
173 rgLabels.Add(nLabel);
174 }
175
176 rgLabels.Sort();
177
178 m_nLabelCount = rgLabels.Count;
179 m_nLabelStart = rgLabels.Min();
180 }
181 else
182 {
183 for (int i = 0; i < m_nLabelCount; i++)
184 {
185 rgLabels.Add(m_nLabelStart + i);
186 }
187 }
188
189 int nNum = rgLabels.Count * m_nCacheSize;
190 m_blobLabeledDataCache = new Blob<T>(m_cuda, m_log, nNum, colBottom[0].channels, colBottom[0].height, colBottom[0].width);
191 m_blobLabeledDataCache.SetData(0);
192 m_hCacheCursors = m_cuda.AllocHostBuffer(rgLabels.Count * 2);
193 m_hWorkDataHost = m_cuda.AllocHostBuffer(labels.count());
194 }
195
196 m_log.CHECK_EQ(data.num, labels.count(), "The label counts do not match the batch size!");
197
198 m_cuda.copy_batch(data.count(), data.num, data.count(1), data.gpu_data, labels.gpu_data, m_blobLabeledDataCache.count(), m_blobLabeledDataCache.mutable_gpu_data, m_blobLabeledDataCache.mutable_gpu_diff, m_nLabelStart, m_nLabelCount, m_nCacheSize, m_hCacheCursors, m_hWorkDataHost);
199
200 int nK = m_nK;
201 List<long> rgTop = new List<long>();
202 List<int> rgTopCount = new List<int>();
203
204 for (int i = 0; i < colTop.Count; i++)
205 {
206 rgTop.Add(colTop[i].mutable_gpu_data);
207 rgTopCount.Add(colTop[i].count());
208 }
209
210 m_cuda.copy_sequence(nK, data.num, data.count(1), data.gpu_data, labels.gpu_data, m_blobLabeledDataCache.count(), m_blobLabeledDataCache.gpu_data, m_nLabelStart, m_nLabelCount, m_nCacheSize, m_hCacheCursors, m_bOutputLabels, rgTop, rgTopCount, m_hWorkDataHost, m_bBalanceMatches);
211 }
212
214 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
215 {
216 if (rgbPropagateDown[0])
217 throw new NotImplementedException();
218 }
219 }
220}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
Definition: Log.cs:239
The BlobCollection contains a list of Blobs.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
Definition: Blob.cs:1922
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1555
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
Definition: Blob.cs:1470
int count()
Returns the total number of items in the Blob.
Definition: Blob.cs:739
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
Definition: Blob.cs:402
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
Definition: Blob.cs:792
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
float convertF(T df)
Converts a generic to a float value.
Definition: Layer.cs:1359
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
DataSequence Layer - this caches inputs by label and then outputs data item tuplets that include an '...
override int ExactNumBottomBlobs
Returns the exact number of required bottom (intput) Blobs: data, label
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int? MinTopBlobs
Returns the minimum number of required top (output) Blobs: anchor, positve (k > 0),...
DataSequenceLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The DataSequenceLayer constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
During the forward pass, each input data item is cached by label and then sequencing is performed on ...
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented - the DataSequence Layer does not perform backward.
override void dispose()
Releases all GPU and host resources used by the Layer.
Specifies the base parameter for all layers.
List< bool > propagate_down
Specifies whether or not the LayerParameter (or protions of) should be backpropagated.
DataSequenceParameter data_sequence_param
Returns the parameter set when initialized with LayerType.DATA_SEQUENCE
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.beta namespace contains all beta stage layers.
Definition: LayerFactory.cs:9
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12