MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
LayerFactory.cs
1using MyCaffe.basecode;
2using MyCaffe.common;
3using MyCaffe.param;
4
9{
14 {
25 {
26 switch (p.type)
27 {
28 case LayerParameter.LayerType.ACCURACY_DECODE:
29 return new AccuracyDecodeLayer<double>(cuda, log, p);
30
31 case LayerParameter.LayerType.ACCURACY_ENCODING:
32 return new AccuracyEncodingLayer<double>(cuda, log, p);
33
34 case LayerParameter.LayerType.ATTENTION:
35 return new AttentionLayer<double>(cuda, log, p);
36
37 case LayerParameter.LayerType.CONVOLUTION_OCTAVE:
38 return new ConvolutionOctaveLayer<double>(cuda, log, p);
39
40 case LayerParameter.LayerType.COPY:
41 return new CopyLayer<double>(cuda, log, p);
42
43 case LayerParameter.LayerType.DATA_SEQUENCE:
44 return new DataSequenceLayer<double>(cuda, log, p);
45
46 case LayerParameter.LayerType.DECODE:
47 return new DecodeLayer<double>(cuda, log, p);
48
49 case LayerParameter.LayerType.GATHER:
50 return new GatherLayer<double>(cuda, log, p);
51
52 case LayerParameter.LayerType.GLOBRES_NORM:
53 return new GlobResNormLayer<double>(cuda, log, p);
54
55 case LayerParameter.LayerType.INTERP:
56 return new InterpLayer<double>(cuda, log, p);
57
59 return new KnnLayer<double>(cuda, log, p);
60
61 case LayerParameter.LayerType.LSTM_ATTENTION:
62 return new LSTMAttentionLayer<double>(cuda, log, p);
63
64 case LayerParameter.LayerType.MEAN_ERROR_LOSS:
65 return new MeanErrorLossLayer<double>(cuda, log, p);
66
67 case LayerParameter.LayerType.MERGE:
68 return new MergeLayer<double>(cuda, log, p);
69
70 case LayerParameter.LayerType.MISH:
71 return new MishLayer<double>(cuda, log, p);
72
73 case LayerParameter.LayerType.NORMALIZATION1:
74 return new Normalization1Layer<double>(cuda, log, p);
75
76 case LayerParameter.LayerType.MODEL_DATA:
77 return new ModelDataLayer<double>(cuda, log, p, db, evtCancel);
78
79 case LayerParameter.LayerType.SERF:
80 return new SerfLayer<double>(cuda, log, p);
81
82 case LayerParameter.LayerType.TEXT_DATA:
83 return new TextDataLayer<double>(cuda, log, p);
84
85 case LayerParameter.LayerType.TRANSPOSE:
86 return new TransposeLayer<double>(cuda, log, p);
87
88 case LayerParameter.LayerType.TRIPLET_LOSS:
89 return new TripletLossLayer<double>(cuda, log, p);
90
91 case LayerParameter.LayerType.SQUEEZE:
92 return new SqueezeLayer<double>(cuda, log, p);
93
94 case LayerParameter.LayerType.UNSQUEEZE:
95 return new UnsqueezeLayer<double>(cuda, log, p);
96
97 case LayerParameter.LayerType.UNPOOLING1:
98 return new UnPoolingLayer1<double>(cuda, log, p);
99
100 case LayerParameter.LayerType.UNPOOLING:
101 return new UnPoolingLayer<double>(cuda, log, p);
102
103 default:
104 return null;
105 }
106 }
107
118 {
119 switch (p.type)
120 {
121 case LayerParameter.LayerType.ACCURACY_DECODE:
122 return new AccuracyDecodeLayer<float>(cuda, log, p);
123
124 case LayerParameter.LayerType.ACCURACY_ENCODING:
125 return new AccuracyEncodingLayer<float>(cuda, log, p);
126
127 case LayerParameter.LayerType.ATTENTION:
128 return new AttentionLayer<float>(cuda, log, p);
129
130 case LayerParameter.LayerType.CONVOLUTION_OCTAVE:
131 return new ConvolutionOctaveLayer<float>(cuda, log, p);
132
133 case LayerParameter.LayerType.COPY:
134 return new CopyLayer<float>(cuda, log, p);
135
136 case LayerParameter.LayerType.DATA_SEQUENCE:
137 return new DataSequenceLayer<float>(cuda, log, p);
138
139 case LayerParameter.LayerType.DECODE:
140 return new DecodeLayer<float>(cuda, log, p);
141
142 case LayerParameter.LayerType.GATHER:
143 return new GatherLayer<float>(cuda, log, p);
144
145 case LayerParameter.LayerType.GLOBRES_NORM:
146 return new GlobResNormLayer<float>(cuda, log, p);
147
148 case LayerParameter.LayerType.INTERP:
149 return new InterpLayer<float>(cuda, log, p);
150
151 case LayerParameter.LayerType.KNN:
152 return new KnnLayer<float>(cuda, log, p);
153
154 case LayerParameter.LayerType.LSTM_ATTENTION:
155 return new LSTMAttentionLayer<float>(cuda, log, p);
156
157 case LayerParameter.LayerType.MEAN_ERROR_LOSS:
158 return new MeanErrorLossLayer<float>(cuda, log, p);
159
160 case LayerParameter.LayerType.MERGE:
161 return new MergeLayer<float>(cuda, log, p);
162
163 case LayerParameter.LayerType.MISH:
164 return new MishLayer<float>(cuda, log, p);
165
166 case LayerParameter.LayerType.NORMALIZATION1:
167 return new Normalization1Layer<float>(cuda, log, p);
168
169 case LayerParameter.LayerType.MODEL_DATA:
170 return new ModelDataLayer<float>(cuda, log, p, db, evtCancel);
171
172 case LayerParameter.LayerType.SERF:
173 return new SerfLayer<float>(cuda, log, p);
174
175 case LayerParameter.LayerType.TEXT_DATA:
176 return new TextDataLayer<float>(cuda, log, p);
177
178 case LayerParameter.LayerType.TRANSPOSE:
179 return new TransposeLayer<float>(cuda, log, p);
180
181 case LayerParameter.LayerType.TRIPLET_LOSS:
182 return new TripletLossLayer<float>(cuda, log, p);
183
184 case LayerParameter.LayerType.SQUEEZE:
185 return new SqueezeLayer<float>(cuda, log, p);
186
187 case LayerParameter.LayerType.UNSQUEEZE:
188 return new UnsqueezeLayer<float>(cuda, log, p);
189
190 case LayerParameter.LayerType.UNPOOLING1:
191 return new UnPoolingLayer1<float>(cuda, log, p);
192
193 case LayerParameter.LayerType.UNPOOLING:
194 return new UnPoolingLayer<float>(cuda, log, p);
195
196 default:
197 return null;
198 }
199 }
200 }
201}
The CancelEvent provides an extension to the manual cancel event that allows for overriding the manua...
Definition: CancelEvent.cs:17
The Log class provides general output in text form.
Definition: Log.cs:13
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
[DEPRECIATED] The AttentionLayer provides focus for LSTM based encoder/decoder models.
The CopyLayer copies the src bottom to the dst bottom. The layer has no output.
Definition: CopyLayer.cs:16
The LSTMAttentionLayer adds attention to the long-short term memory layer and is used in encoder/deco...
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
The AccuracyDecodeLayer compares the labels output by the DecodeLayer with the expected labels output...
The AccuracyEncodingLayer computes the classification accuracy for an encoding used in a classificati...
The ConvolutionOctaveLayer processes high and low frequency portions of images using convolution.
DataSequence Layer - this caches inputs by label and then outputs data item tuplets that include an '...
The DecodeLayer decodes the label of a classification for an encoding produced by a Siamese Network o...
Definition: DecodeLayer.cs:25
The GatherLayer extracts (gathers) data from specified indices along a given axis from the input and ...
Definition: GatherLayer.cs:19
The GRNLayer performs an L2 normalization over the input data.
The InterpLayer changes the spatial resolution by bi-linear interpolation.
Definition: InterpLayer.cs:23
The LayerFactor is responsible for creating all layers implemented in the MyCaffe....
Definition: LayerFactory.cs:14
Layer< double > CreateDouble(CudaDnn< double > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the double base type.
Definition: LayerFactory.cs:24
Layer< float > CreateSingle(CudaDnn< float > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the float base type.
The MeanErrorLossLayer computes losses based on various different Mean Error methods as shown below....
The MergeLayer merges two bottom blobs with a specified copy pattern and outputs a single blob result...
Definition: MergeLayer.cs:20
The MishLayer provides a novel activation function that tends to work better than ReLU....
Definition: MishLayer.cs:26
The ModelDataLayer loads data from RawImageResults table for an encoder/decoder type model.
The Normalization1Layer performs an L2 normalization over the input data. This layer is initialized w...
The SerfLayer provides a novel activation function that tends to work better than ReLU.
Definition: SerfLayer.cs:22
The SqueezeLayer performs a squeeze operation where all single dimensions are removed.
Definition: SqueezeLayer.cs:17
The TextDataLayer loads data from text data files for an encoder/decoder type model....
The TransposeLayer performs a permute and transpose operation similar to numpy.transpose.
TripletLoss Layer - this is the triplet loss layer used to calculate the triplet loss and gradients u...
The UnsqueezeLayer performs an unsqueeze operation where a single dimension is inserted at each index...
Specifies the base parameter for all layers.
LayerType type
Specifies the type of this LayerParameter.
LayerType
Specifies the layer type.
The ILayerCreator interface is implemented by each MyCaffe.layers.x layer extension dll and is used t...
Definition: Interfaces.cs:19
The IXDatabaseBase interface defines the general interface to the in-memory database.
Definition: Interfaces.cs:444
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.layers.beta namespace contains all beta stage layers.
Definition: LayerFactory.cs:9
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12