MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
ReshapeTemporalLayer.cs
1using System;
2using System.Collections.Generic;
3using System.Diagnostics;
4using System.Linq;
5using System.Net.Http.Headers;
6using System.Reflection;
7using System.Text;
8using MyCaffe.basecode;
9using MyCaffe.common;
10using MyCaffe.param;
11using MyCaffe.param.tft;
12
13namespace MyCaffe.layers.tft
14{
27 public class ReshapeTemporalLayer<T> : Layer<T>
28 {
30 int m_nNumSamples;
31 int m_nNumRepeatCount;
32 int m_nForcedRepeatCount;
33 List<int> m_rgShape = new List<int>(4);
34 Dictionary<string, List<int>> m_rgShapes = new Dictionary<string, List<int>>();
35 Blob<T> m_blobTimeDistributedContext;
36 Blob<T> m_blobWork;
37
45 : base(cuda, log, p)
46 {
47 m_type = LayerParameter.LayerType.RESHAPE_TEMPORAL;
48 }
49
51 protected override void dispose()
52 {
53 dispose(ref m_blobTimeDistributedContext);
54 dispose(ref m_blobWork);
55 }
56
58 protected override void setup_internal_blobs(BlobCollection<T> col)
59 {
60 if (col.Count > 0)
61 return;
62
63 if (m_blobTimeDistributedContext != null)
64 col.Add(m_blobTimeDistributedContext);
65 }
66
70 public override int MinBottomBlobs
71 {
72 get { return 1; }
73 }
74
78 public override int MaxBottomBlobs
79 {
80 get { return 2; }
81 }
82
86 public override int MinTopBlobs
87 {
88 get { return 1; }
89 }
90
94 public override int MaxTopBlobs
95 {
96 get { return 3; }
97 }
98
109 private void replicate_along_time_fwd(Blob<T> bBtm, Blob<T> bTop, int nTimeSteps, bool bTemporalRepeat, bool bReshapeOnly = false)
110 {
111 m_rgShape.Clear();
112
113 if (bTemporalRepeat)
114 {
115 m_rgShape.Add(m_nNumSamples);
116 m_rgShape.Add(nTimeSteps);
117 m_rgShape.Add(bBtm.shape(1));
118 bTop.Reshape(m_rgShape);
119
120 if (!bReshapeOnly)
121 {
122 int nInnerNum = bBtm.count(1);
123 for (int i = 0; i < nTimeSteps; i++)
124 {
125 m_cuda.channel_copy(bBtm.count(), m_nNumSamples, 1, nTimeSteps, nInnerNum, i, bTop.mutable_gpu_data, bBtm.gpu_data, DIR.BWD);
126 }
127 }
128 }
129 else
130 {
131 m_rgShape.Add(nTimeSteps);
132 m_rgShape.Add(m_nNumSamples);
133 m_rgShape.Add(bBtm.shape(1));
134 bTop.Reshape(m_rgShape);
135
136 if (!bReshapeOnly)
137 {
138 int nInnerNum = bBtm.count(1);
139 for (int i = 0; i < nTimeSteps; i++)
140 {
141 m_cuda.channel_copy(bBtm.count(), 1, 1, nTimeSteps, m_nNumSamples * nInnerNum, i, bTop.mutable_gpu_data, bBtm.gpu_data, DIR.BWD);
142 }
143 }
144 }
145 }
146
156 private void replicate_along_time_bwd(Blob<T> bBtm, Blob<T> bTop, int nTimeSteps, bool bTemporalRepeat)
157 {
158 int nInnerNum = bBtm.count(1);
159
160 if (bTemporalRepeat)
161 m_cuda.channel_sum(bTop.count(), m_nNumSamples, nTimeSteps, nInnerNum, bTop.gpu_diff, bBtm.mutable_gpu_diff, true);
162 else
163 m_cuda.channel_sum(bTop.count(), 1, nTimeSteps, m_nNumSamples * nInnerNum, bTop.gpu_diff, bBtm.mutable_gpu_diff, true);
164
165 //m_cuda.channel_copy(bBtm.count(), m_nNumSamples, 1, nTimeSteps, nInnerNum, 0, bTop.mutable_gpu_diff, bBtm.gpu_diff, DIR.FWD);
166
167 //for (int i = 1; i < nTimeSteps; i++)
168 //{
169 // m_cuda.channel_add(bBtm.count(), m_nNumSamples, 1, nTimeSteps, nInnerNum, i, bTop.mutable_gpu_diff, bBtm.mutable_gpu_diff, DIR.FWD);
170 //}
171 }
172
173 private void stack_time_steps_along_batch_fwd(Blob<T> bBtm, Blob<T> bTop, bool bResizeOnly = false)
174 {
175 if (!bResizeOnly)
176 bTop.CopyFrom(bBtm, false, true);
177
178 m_rgShape.Clear();
179 m_rgShape.Add(bBtm.shape(0) * bBtm.shape(1));
180 m_rgShape.Add(bBtm.count(2));
181 bTop.Reshape(m_rgShape);
182 }
183
184 private void stack_time_steps_along_batch_bwd(Blob<T> bBtm, Blob<T> bTop)
185 {
186 bBtm.CopyFrom(bTop, true, false, 0, true);
187 }
188
194 public override void LayerSetUp(BlobCollection<T> colBottom, BlobCollection<T> colTop)
195 {
197
198 if (m_mode == param.tft.ReshapeTemporalParameter.MODE.BEFORE)
199 {
200 m_nNumSamples = colBottom[0].num;
202
203 if (m_nForcedRepeatCount >= 0)
204 m_nNumRepeatCount = m_nForcedRepeatCount;
205 else
206 m_nNumRepeatCount = colBottom[0].shape(1);
207
208 // replicate the selection signal along time
209 if (colBottom.Count > 1)
210 {
211 m_blobWork = new Blob<T>(m_cuda, m_log);
212 m_blobWork.Name = "work";
213
214 if (m_nNumRepeatCount > 0)
215 {
216 m_blobTimeDistributedContext = new Blob<T>(m_cuda, m_log);
217 m_blobTimeDistributedContext.Name = m_param.name + ".tdctx";
218 replicate_along_time_fwd(colBottom[1], m_blobTimeDistributedContext, m_nNumRepeatCount, m_nForcedRepeatCount < 0, true);
219 stack_time_steps_along_batch_fwd(m_blobTimeDistributedContext, colTop[1], true);
220 }
221 else
222 {
223 stack_time_steps_along_batch_fwd(colBottom[1], colTop[1], true);
224 }
225
226 colTop[1].SetParameter("num_samples", m_nNumSamples);
227 colTop[1].SetParameter("num_temporal_steps", m_nNumRepeatCount);
228 colTop[1].SetParameter("forced_temporal_steps", m_nForcedRepeatCount);
229 }
230
231 // Apply the same selection module on all timesteps by stacking the time dimension with the batch dimension
232 stack_time_steps_along_batch_fwd(colBottom[0], colTop[0], true);
233 colTop[0].SetParameter("num_samples", m_nNumSamples);
234 colTop[0].SetParameter("num_temporal_steps", colBottom[0].shape(1));
235 }
236 else
237 {
238 m_nNumSamples = (int)colBottom[0].GetParameter("num_samples");
239 int nTemporalSteps = (int)colBottom[0].GetParameter("num_temporal_steps");
240
241 int nCount = colBottom[0].count();
242 int nDim = m_nNumSamples * nTemporalSteps;
243 m_rgShape.Clear();
244 m_rgShape.Add(m_nNumSamples);
245 m_rgShape.Add(nTemporalSteps);
246 m_rgShape.Add(nCount / nDim);
247 colTop[0].Reshape(m_rgShape);
248
249 int nIdx = 1;
251 {
252 m_log.CHECK_GT(colTop.Count, nIdx, "There must be at least " + (nIdx + 1).ToString() + " tops for the enable clip output!");
253 m_rgShape.Clear();
254 m_rgShape.Add(m_nNumSamples);
255 m_rgShape.Add(nTemporalSteps);
256 colTop[nIdx].Reshape(m_rgShape);
257 nIdx++;
258 }
259
260 if (colBottom.Count > 1)
261 {
262 m_nNumRepeatCount = (int)colBottom[1].GetParameter("num_temporal_steps");
263 m_nForcedRepeatCount = (int)colBottom[1].GetParameter("forced_temporal_steps");
264
266 {
267 m_log.CHECK_GT(colTop.Count, nIdx, "There must be at least " + (nIdx + 1).ToString() + " tops for the enable clip output!");
268 nCount = colBottom[1].count();
269 m_rgShape.Clear();
270
271 if (m_nForcedRepeatCount >= 0)
272 {
273 m_rgShape.Add(m_nNumSamples);
274 m_rgShape.Add(nCount / nDim);
275 }
276 else
277 {
278 m_rgShape.Add(m_nNumSamples);
279 if (m_nNumRepeatCount > 0)
280 m_rgShape.Add(m_nNumRepeatCount);
281 m_rgShape.Add(nCount / nDim);
282 }
283
284 colTop[nIdx].Reshape(m_rgShape);
285 }
286 }
287 }
288
290 }
291
297 public override void Reshape(BlobCollection<T> colBottom, BlobCollection<T> colTop)
298 {
299 if (m_mode == ReshapeTemporalParameter.MODE.BEFORE)
300 {
301 // replicate the selection signal along time
302 if (colBottom.Count > 1)
303 {
304 if (m_nNumRepeatCount > 0)
305 {
306 replicate_along_time_fwd(colBottom[1], m_blobTimeDistributedContext, m_nNumRepeatCount, m_nForcedRepeatCount < 0, true);
307 m_blobWork.ReshapeLike(colBottom[1]);
308 stack_time_steps_along_batch_fwd(m_blobTimeDistributedContext, colTop[1], true);
309 }
310 else
311 {
312 stack_time_steps_along_batch_fwd(colBottom[1], colTop[1], true);
313 }
314 }
315
316 // Apply the same selection module on all timesteps by stacking the time dimension with the batch dimension
317 stack_time_steps_along_batch_fwd(colBottom[0], colTop[0], true);
318 }
319 else
320 {
321 int nTemporalSteps = (int)colBottom[0].GetParameter("num_temporal_steps");
322 int nCount = colBottom[0].count();
323 int nDim = m_nNumSamples * nTemporalSteps;
324 m_rgShape.Clear();
325 m_rgShape.Add(m_nNumSamples);
326 m_rgShape.Add(nTemporalSteps);
327 m_rgShape.Add(nCount / nDim);
328 colTop[0].Reshape(m_rgShape);
329
330 int nIdx = 1;
332 {
333 m_log.CHECK_GT(colTop.Count, nIdx, "There must be at least " + (nIdx + 1).ToString() + " tops for the enable clip output!");
334 m_rgShape.Clear();
335 m_rgShape.Add(m_nNumSamples);
336 m_rgShape.Add(nTemporalSteps);
337 colTop[nIdx].Reshape(m_rgShape);
338 nIdx++;
339 }
340
341 if (colBottom.Count > 1)
342 {
343 m_nNumRepeatCount = (int)colBottom[1].GetParameter("num_temporal_steps");
344 m_nForcedRepeatCount = (int)colBottom[1].GetParameter("forced_temporal_steps");
345
347 {
348 m_log.CHECK_GT(colTop.Count, nIdx, "There must be at least " + (nIdx + 1).ToString() + " tops for the enable clip output!");
349 nCount = colBottom[1].count();
350 m_rgShape.Clear();
351
352 if (m_nForcedRepeatCount >= 0)
353 {
354 m_rgShape.Add(m_nNumSamples);
355 m_rgShape.Add(nCount / nDim);
356 }
357 else
358 {
359 m_rgShape.Add(m_nNumSamples);
360 if (m_nNumRepeatCount > 0)
361 m_rgShape.Add(m_nNumRepeatCount);
362 m_rgShape.Add(nCount / nDim);
363 }
364
365 colTop[nIdx].Reshape(m_rgShape);
366 }
367 }
368 }
369 }
370
382 protected override void forward(BlobCollection<T> colBottom, BlobCollection<T> colTop)
383 {
384 if (m_mode == ReshapeTemporalParameter.MODE.BEFORE)
385 {
386 if (colBottom.Count > 1)
387 {
388 // replicate the selection signal along time
389 if (m_nNumRepeatCount > 0)
390 {
391 replicate_along_time_fwd(colBottom[1], m_blobTimeDistributedContext, m_nNumRepeatCount, m_nForcedRepeatCount < 0);
392 stack_time_steps_along_batch_fwd(m_blobTimeDistributedContext, colTop[1]);
393 }
394 else
395 {
396 stack_time_steps_along_batch_fwd(colBottom[1], colTop[1]);
397 }
398 }
399
400 // Apply the same selection module on all timesteps by stacking the time dimension with the batch dimension
401 stack_time_steps_along_batch_fwd(colBottom[0], colTop[0]);
402 }
403 else
404 {
405 colTop[0].CopyFrom(colBottom[0], false, false, 0, true);
406
407 int nIdx = 1;
409 {
410 colTop[nIdx].SetData(1);
411 nIdx++;
412 }
413
415 {
416 colTop[nIdx].CopyFrom(colBottom[1], false, false, 0, true);
417 }
418 }
419 }
420
435 protected override void backward(BlobCollection<T> colTop, List<bool> rgbPropagateDown, BlobCollection<T> colBottom)
436 {
437 if (m_mode == ReshapeTemporalParameter.MODE.BEFORE)
438 {
439 // Apply the same selection module on all timesteps by stacking the time dimension with the batch dimension
440 stack_time_steps_along_batch_bwd(colBottom[0], colTop[0]);
441
442 // replicate the static selection signal along time
443 if (colBottom.Count > 1)
444 {
445 if (m_nNumRepeatCount > 0)
446 {
447 stack_time_steps_along_batch_bwd(m_blobTimeDistributedContext, colTop[1]);
448 replicate_along_time_bwd(colBottom[1], m_blobTimeDistributedContext, m_nNumRepeatCount, m_nForcedRepeatCount < 0);
449 }
450 else
451 {
452 stack_time_steps_along_batch_bwd(colBottom[1], colTop[1]);
453 }
454 }
455 }
456 else
457 {
458 colBottom[0].CopyFrom(colTop[0], true, false, 0, true);
459 int nIdx = 1;
460
462 nIdx++;
463
464 if (colBottom.Count > 1)
465 colBottom[1].CopyFrom(colTop[nIdx], true, false, 0, true);
466 }
467 }
468 }
469}
The Log class provides general output in text form.
Definition: Log.cs:13
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
Definition: Log.cs:299
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Clear(bool bDispose=false)
Remove all items from the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
void CopyFrom(BlobCollection< T > bSrc, bool bCopyDiff=false)
Copy the data or diff from another BlobCollection into this one.
The Blob is the main holder of data that moves through the Layers of the Net.
Definition: Blob.cs:25
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1555
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1487
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
Definition: Blob.cs:442
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
Definition: Blob.cs:903
List< int > shape()
Returns an array where each element contains the shape of an axis of the Blob.
Definition: Blob.cs:684
int count()
Returns the total number of items in the Blob.
Definition: Blob.cs:739
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
Definition: Blob.cs:648
string Name
Get/set the name of the Blob.
Definition: Blob.cs:2184
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1541
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
Definition: Blob.cs:1479
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
Log m_log
Specifies the Log for output.
Definition: Layer.cs:43
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
Definition: Layer.cs:47
BlobCollection< T > m_colInternalBlobs
Specifies internal blobs used by the layer.
Definition: Layer.cs:59
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
Definition: Layer.cs:39
LayerParameter.LayerType m_type
Specifies the Layer type.
Definition: Layer.cs:35
The ReshapeTemporalLayer implements the Variable Selection Network
override int MinBottomBlobs
Returns the min number of required bottom (input) Blobs: temporal_rep
override int MinTopBlobs
Returns the exact number of required top (output) Blobs: temporal_selection_output
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
ReshapeTemporalLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The constructor.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the stacked embedding numeric and categorical value inputs.
override int MaxTopBlobs
Returns the exact number of required top (output) Blobs: temporal_selection_output,...
override void dispose()
Releases all GPU and host resources used by the Layer.
override int MaxBottomBlobs
Returns the max number of required bottom (input) Blobs: temporal_rep, static_selection
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the top (output) blobs.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
ReshapeTemporalParameter reshape_temporal_param
Returns the parameter set when initialized with LayerType.RESHAPE_TEMPORAL
LayerType
Specifies the layer type.
Specifies the parameters for the ReshapeTemporalLayer.
bool enable_weight_output
Specifies to output the weights for the data output in the AFTER mode.
int forced_repeat_count
Specifies the forced repeat steps bottom(1). A value of -1 specifies to use the temporal axis as the ...
bool enable_clip_output
Specifies to output the clip for the data output in the AFTER mode.
ReshapeTemporalParameter()
Constructor for the parameter.
MODE mode
Specifies the mode of operation.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
DIR
Defines the direction of data flow.
Definition: CudaDnn.cs:22
The MyCaffe.layers.tft namespace contains all TFT related layers.
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12