2using Google.Protobuf.Collections;
14using System.Collections.Generic;
21using System.Threading.Tasks;
37 string m_strReport =
"";
38 string m_strOriginalPath =
null;
39 bool m_bEnableBackward =
false;
40 double? m_dfWtScaleMin =
null;
41 double? m_dfWtScaleMax =
null;
42 List<string> m_rgstrIgnoreLayerNames =
new List<string>();
43 int m_nReshapeCount = 0;
44 int m_nFlattenCount = 0;
45 int m_nDropoutCount = 0;
46 List<string> m_rgstrTensorsAlreadyAdded =
new List<string>();
47 Dictionary<Blob<T>,
string> m_rgBlobNameFixups =
new Dictionary<Blob<T>,
string>();
54 InitializeComponent();
55 m_strOriginalPath = AssemblyDirectory;
66 InitializeComponent();
67 m_strOriginalPath = AssemblyDirectory;
70 private static string AssemblyDirectory
74 string codeBase = Assembly.GetExecutingAssembly().CodeBase;
75 UriBuilder uri =
new UriBuilder(codeBase);
76 string path = Uri.UnescapeDataString(uri.Path);
77 return Path.GetDirectoryName(path);
88 m_dfWtScaleMax = dfMax;
89 m_dfWtScaleMin = dfMin;
97 get {
return m_rgstrIgnoreLayerNames; }
98 set { m_rgstrIgnoreLayerNames = value; }
106 get {
return m_strReport; }
122 m_strOriginalPath = Path.GetDirectoryName(strOutputFile);
123 ModelProto protoOnnx =
ConvertMyCaffeToOnnx(cuda, log, data, nOpSetVersion, bUseRawData, dstDataType);
124 PersistOnnx persist =
new PersistOnnx();
127 persist.Save(protoOnnx, strOutputFile);
157 ModelProto protoOnnx = convertToOnnx(log, net, nOpSetVersion, bUseRawData, dstDataType);
178 return convertToOnnx(ctrl.
Log, net, nOpSetVersion, bUseRawData, dstDataType);
192 m_strOriginalPath = Path.GetDirectoryName(strOnnxFile);
194 PersistOnnx persist =
new PersistOnnx();
195 persist.Save(proto, strOnnxFile);
211 m_strOriginalPath = Path.GetDirectoryName(strOnnxFile);
212 PersistOnnx persist =
new PersistOnnx();
213 ModelProto proto = persist.Load(strOnnxFile);
233 Tuple<NetParameter, BlobCollection<T>> data = convertToMyCaffe(cuda, log, onnxModel, bFixupNeuronNodes, dsTraining);
238 if (!bIncludeLastLayerWeights && data.Item2.Count > 0)
239 data.Item2.RemoveAt(data.Item2.Count - 1);
242 byte[] rgWeights = persist.
SaveWeights(data.Item2,
false);
247 private ModelProto convertToOnnx(
Log log,
Net<T> net,
int nOpSetVersion = 9,
bool bUseRawData =
true, OnnxDefinitions.DataType dstDataType = OnnxDefinitions.DataType.FLOAT)
249 ModelProto proto =
new ModelProto();
254 Assembly assembly = Assembly.GetExecutingAssembly();
255 FileVersionInfo ver = FileVersionInfo.GetVersionInfo(assembly.Location);
258 proto.ProducerName =
"MyCaffe Converter for ONNX";
259 proto.ProducerVersion = ver.FileVersion;
260 proto.Domain =
"org.mycaffe";
261 proto.ModelVersion = 1;
263 OperatorSetIdProto opset =
new OperatorSetIdProto();
264 opset.Version = nOpSetVersion;
266 proto.OpsetImport.Add(opset);
268 StringStringEntryProto author =
new StringStringEntryProto();
269 author.Key =
"model_author";
270 author.Value =
"SignalPop LLC";
271 proto.MetadataProps.Add(author);
273 StringStringEntryProto license =
new StringStringEntryProto();
274 license.Key =
"model_license";
275 license.Value =
"https://github.com/MyCaffe/MyCaffe/blob/master/LICENSE";
276 proto.MetadataProps.Add(license);
278 proto.Graph =
new GraphProto();
279 proto.Graph.Name = netParam.
name;
282 addNodes(log, proto.Graph.Node, net.
layers, proto.Graph.Initializer);
285 foreach (KeyValuePair<
Blob<T>,
string> kv
in m_rgBlobNameFixups)
287 kv.Key.Name = kv.Value;
293 private void addValueInfo(RepeatedField<ValueInfoProto> rg,
BlobCollection<T> blobs)
295 foreach (
Blob<T> blob
in blobs)
297 ValueInfoProto val =
new ValueInfoProto();
298 val.Name = blob.
Name;
300 TypeProto type =
new TypeProto();
301 type.TensorType =
new TypeProto.Types.Tensor();
302 type.TensorType.ElemType = (int)OnnxDefinitions.DataType.FLOAT;
303 type.TensorType.Shape =
new TensorShapeProto();
305 foreach (
int nShape
in blob.
shape())
307 TensorShapeProto.Types.Dimension dim =
new TensorShapeProto.Types.Dimension();
308 dim.DimValue = nShape;
309 type.TensorType.Shape.Dim.Add(dim);
317 private string removeWs(
string str,
char ch)
321 foreach (
char ch1
in str)
323 if (
char.IsWhiteSpace(ch1))
332 private void addShapeTensor(RepeatedField<TensorProto> rg,
string strName, List<int> rgShape)
334 TensorProto tensor =
new TensorProto();
335 tensor.Name = strName;
336 tensor.DataType = (int)OnnxDefinitions.DataType.INT64;
337 tensor.Dims.Add(rgShape.Count);
339 for (
int i = 0; i < rgShape.Count; i++)
341 tensor.Int64Data.Add(rgShape[i]);
347 private string addTensor(RepeatedField<TensorProto> rg,
Blob<T> blob,
string strExtra,
bool bUseRawData =
true, OnnxDefinitions.DataType dstDataType = OnnxDefinitions.DataType.FLOAT)
349 TensorProto tensor =
new TensorProto();
350 tensor.Name = removeWs(blob.
Name,
'_') + strExtra;
351 tensor.DataType = (int)dstDataType;
353 List<int> rgShape =
new List<int>();
354 foreach (
int nShape
in blob.
shape())
359 while (rgShape.Count > 1 && rgShape[rgShape.Count - 1] == 1)
361 rgShape.RemoveAt(rgShape.Count - 1);
364 for (
int i = 0; i < rgShape.Count; i++)
366 tensor.Dims.Add(rgShape[i]);
373 if (dstDataType == OnnxDefinitions.DataType.FLOAT)
375 if (typeof(T) == typeof(
float))
377 float[] rgfData =
Utility.ConvertVecF<T>(rgData);
378 byte[] rgByte =
new byte[rgfData.Length *
sizeof(float)];
379 Buffer.BlockCopy(rgfData, 0, rgByte, 0, rgByte.Length);
380 tensor.RawData = ByteString.CopyFrom(rgByte);
385 float[] rgfData2 = rgfData.Select(p => (
float)p).ToArray();
386 byte[] rgByte =
new byte[rgfData2.Length *
sizeof(float)];
387 Buffer.BlockCopy(rgfData2, 0, rgByte, 0, rgByte.Length);
388 tensor.RawData = ByteString.CopyFrom(rgByte);
391 else if (dstDataType == OnnxDefinitions.DataType.DOUBLE)
393 if (typeof(T) == typeof(
float))
395 float[] rgfData =
Utility.ConvertVecF<T>(rgData);
396 double[] rgfData2 = rgfData.Select(p => (
double)p).ToArray();
397 byte[] rgByte =
new byte[rgfData2.Length *
sizeof(double)];
398 Buffer.BlockCopy(rgfData2, 0, rgByte, 0, rgByte.Length);
399 tensor.RawData = ByteString.CopyFrom(rgByte);
404 byte[] rgByte =
new byte[rgfData.Length *
sizeof(double)];
405 Buffer.BlockCopy(rgfData, 0, rgByte, 0, rgByte.Length);
406 tensor.RawData = ByteString.CopyFrom(rgByte);
410 throw new Exception(
"Currently only the FLOAT and DOUBLE data types are supported when exporting.");
414 if (dstDataType == OnnxDefinitions.DataType.FLOAT)
416 if (typeof(T) == typeof(
float))
418 float[] rgfData =
Utility.ConvertVecF<T>(rgData);
420 foreach (
float val
in rgfData)
422 tensor.FloatData.Add(val);
429 foreach (
double val
in rgfData)
431 tensor.FloatData.Add(Convert.ToSingle(val));
435 else if (dstDataType == OnnxDefinitions.DataType.DOUBLE)
437 if (typeof(T) == typeof(
float))
439 float[] rgfData =
Utility.ConvertVecF<T>(rgData);
441 foreach (
float val
in rgfData)
443 tensor.DoubleData.Add(Convert.ToDouble(val));
450 foreach (
double val
in rgfData)
452 tensor.DoubleData.Add(val);
457 throw new Exception(
"Currently only the FLOAT and DOUBLE data types are supported when exporting.");
462 m_rgstrTensorsAlreadyAdded.Add(blob.
Name);
467 private void addTensors(RepeatedField<TensorProto> rg,
BlobCollection<T> blobs,
bool bUseRawData =
true, OnnxDefinitions.DataType dstDataType = OnnxDefinitions.DataType.FLOAT)
469 foreach (
Blob<T> blob
in blobs)
471 if (m_rgstrTensorsAlreadyAdded.Contains(blob.
Name))
474 addTensor(rg, blob,
"", bUseRawData, dstDataType);
478 private void addNodes(
Log log, RepeatedField<NodeProto> rg, List<
Layer<T>> rgLayers, RepeatedField<TensorProto> rgTensors)
480 Dictionary<string, List<string>> rgTopCounts =
new Dictionary<string, List<string>>();
483 foreach (
Layer<T> layer
in rgLayers)
485 NodeProto node =
new NodeProto();
491 string strBtm1 = strBottom;
493 if (rgTopCounts.ContainsKey(strBottom))
494 strBtm1 = rgTopCounts[strBottom].Last();
496 node.Input.Add(strBottom);
501 if (!rgTopCounts.ContainsKey(strTop))
502 rgTopCounts.Add(strTop,
new List<string>() { strTop });
504 rgTopCounts[strTop].Add(strTop +
"_" + rgTopCounts[strTop].Count.ToString());
506 string strTop1 = rgTopCounts[strTop].Last();
507 node.Output.Add(strTop1);
515 node.OpType = OnnxDefinitions.OPERATORS.Abs.ToString();
520 node.OpType = OnnxDefinitions.OPERATORS.ArgMin.ToString();
522 node.OpType = OnnxDefinitions.OPERATORS.ArgMax.ToString();
526 node.OpType = OnnxDefinitions.OPERATORS.BatchNormalization.ToString();
539 blobScale.
Name = node.Name +
"_scale";
540 string strScale = addTensor(rgTensors, blobScale,
"");
541 node.Input.Add(strScale);
547 blobBias.
Name = node.Name +
"_bias";
548 string strBias = addTensor(rgTensors, blobBias,
"");
549 node.Input.Add(strBias);
560 node.OpType = OnnxDefinitions.OPERATORS.Clip.ToString();
565 node.OpType = OnnxDefinitions.OPERATORS.Concat.ToString();
570 node.OpType = OnnxDefinitions.OPERATORS.Constant.ToString();
575 node.OpType = OnnxDefinitions.OPERATORS.Conv.ToString();
583 node.OpType = OnnxDefinitions.OPERATORS.Dropout.ToString();
585 if (strTraining !=
null)
586 node.Input.Add(strTraining);
591 node.OpType = OnnxDefinitions.OPERATORS.Add.ToString();
593 node.OpType = OnnxDefinitions.OPERATORS.Sub.ToString();
595 node.OpType = OnnxDefinitions.OPERATORS.Mul.ToString();
597 node.OpType = OnnxDefinitions.OPERATORS.Div.ToString();
599 node.OpType = OnnxDefinitions.OPERATORS.Max.ToString();
601 node.OpType = OnnxDefinitions.OPERATORS.Min.ToString();
605 node.OpType = OnnxDefinitions.OPERATORS.Elu.ToString();
610 node.OpType = OnnxDefinitions.OPERATORS.Exp.ToString();
615 node.OpType = OnnxDefinitions.OPERATORS.Flatten.ToString();
620 node.OpType = OnnxDefinitions.OPERATORS.Gather.ToString();
629 NodeProto node1 =
new NodeProto();
630 node1.OpType = OnnxDefinitions.OPERATORS.Flatten.ToString();
631 node1.Name =
"flatten" + m_nFlattenCount.ToString();
632 node1.Input.Add(node.Input[0]);
633 node1.Output.Add(node1.Name);
635 flatten_param.
axis = 1;
636 addAttributes(node1.Attribute, flatten_param);
640 node.Input[0] = node1.Name;
643 node.OpType = OnnxDefinitions.OPERATORS.Gemm.ToString();
651 node.OpType = OnnxDefinitions.OPERATORS.LRN.ToString();
656 node.OpType = OnnxDefinitions.OPERATORS.Log.ToString();
662 node.OpType = OnnxDefinitions.OPERATORS.Acos.ToString();
664 node.OpType = OnnxDefinitions.OPERATORS.Acosh.ToString();
666 node.OpType = OnnxDefinitions.OPERATORS.Cos.ToString();
668 node.OpType = OnnxDefinitions.OPERATORS.Cosh.ToString();
671 node.OpType = OnnxDefinitions.OPERATORS.Asin.ToString();
673 node.OpType = OnnxDefinitions.OPERATORS.Asinh.ToString();
675 node.OpType = OnnxDefinitions.OPERATORS.Sin.ToString();
677 node.OpType = OnnxDefinitions.OPERATORS.Sinh.ToString();
680 node.OpType = OnnxDefinitions.OPERATORS.Atan.ToString();
682 node.OpType = OnnxDefinitions.OPERATORS.Atanh.ToString();
684 node.OpType = OnnxDefinitions.OPERATORS.Tan.ToString();
686 node.OpType = OnnxDefinitions.OPERATORS.Tanh.ToString();
689 node.OpType = OnnxDefinitions.OPERATORS.Ceil.ToString();
691 node.OpType = OnnxDefinitions.OPERATORS.Floor.ToString();
693 node.OpType = OnnxDefinitions.OPERATORS.Neg.ToString();
695 node.OpType = OnnxDefinitions.OPERATORS.Sign.ToString();
697 node.OpType = OnnxDefinitions.OPERATORS.Sqrt.ToString();
704 node.OpType = OnnxDefinitions.OPERATORS.GlobalAveragePool.ToString();
706 node.OpType = OnnxDefinitions.OPERATORS.GlobalMaxPool.ToString();
708 throw new Exception(
"Currently global STOCHASTIC pooling is not supported for ONNX conversion.");
713 node.OpType = OnnxDefinitions.OPERATORS.AveragePool.ToString();
715 node.OpType = OnnxDefinitions.OPERATORS.MaxPool.ToString();
717 throw new Exception(
"Currently STOCHASTIC pooling is not supported for ONNX conversion.");
723 node.OpType = OnnxDefinitions.OPERATORS.PRelu.ToString();
724 colParams = layer.
blobs;
728 node.OpType = OnnxDefinitions.OPERATORS.Pow.ToString();
733 node.OpType = OnnxDefinitions.OPERATORS.ReduceMax.ToString();
735 node.OpType = OnnxDefinitions.OPERATORS.ReduceMean.ToString();
737 node.OpType = OnnxDefinitions.OPERATORS.ReduceMin.ToString();
739 node.OpType = OnnxDefinitions.OPERATORS.ReduceSum.ToString();
741 node.OpType = OnnxDefinitions.OPERATORS.ReduceSumSquare.ToString();
747 node.OpType = OnnxDefinitions.OPERATORS.LeakyRelu.ToString();
749 node.OpType = OnnxDefinitions.OPERATORS.Relu.ToString();
754 node.OpType = OnnxDefinitions.OPERATORS.Reshape.ToString();
755 string strName =
"reshape" + m_nReshapeCount.ToString();
756 node.Input.Add(strName);
762 node.OpType = OnnxDefinitions.OPERATORS.Softmax.ToString();
767 node.OpType = OnnxDefinitions.OPERATORS.Split.ToString();
771 node.OpType = OnnxDefinitions.OPERATORS.Squeeze.ToString();
775 node.OpType = OnnxDefinitions.OPERATORS.Unsqueeze.ToString();
779 node.OpType = OnnxDefinitions.OPERATORS.Transpose.ToString();
794 foreach (
Blob<T> blob
in colParams)
796 node.Input.Add(removeWs(blob.
Name,
'_'));
808 m_lastType = layer.
type;
814 AttributeProto attrib =
new AttributeProto();
815 attrib.Name =
"epsilon";
816 attrib.Type = AttributeProto.Types.AttributeType.Float;
817 attrib.F = (float)p.
eps;
820 attrib =
new AttributeProto();
821 attrib.Name =
"momentum";
822 attrib.Type = AttributeProto.Types.AttributeType.Float;
827 private void addAttributes(RepeatedField<AttributeProto> rgA,
ClipParameter p)
829 AttributeProto attrib =
new AttributeProto();
831 attrib.Type = AttributeProto.Types.AttributeType.Float;
832 attrib.F = (float)p.
min;
835 attrib =
new AttributeProto();
837 attrib.Type = AttributeProto.Types.AttributeType.Float;
838 attrib.F = (float)p.
max;
842 private void addAttributes(RepeatedField<AttributeProto> rgA,
ConcatParameter p)
844 AttributeProto attrib =
new AttributeProto();
845 attrib.Name =
"axis";
846 attrib.Type = AttributeProto.Types.AttributeType.Int;
851 private void addAttributes(RepeatedField<AttributeProto> rgA,
ConstantParameter p)
853 AttributeProto attrib =
new AttributeProto();
854 attrib.Name =
"value";
855 attrib.Type = AttributeProto.Types.AttributeType.Tensor;
856 attrib.T =
new TensorProto();
857 attrib.T.DataType = (int)OnnxDefinitions.DataType.FLOAT;
859 for (
int i = 0; i < p.
values_f.Count; i++)
861 attrib.T.FloatData.Add(p.
values_f[i]);
866 attrib.T.Dims.Add(nDim);
874 AttributeProto attrib =
new AttributeProto();
875 attrib.Name =
"kernel_shape";
876 attrib.Type = AttributeProto.Types.AttributeType.Ints;
883 attrib =
new AttributeProto();
884 attrib.Name =
"strides";
885 attrib.Type = AttributeProto.Types.AttributeType.Ints;
894 attrib =
new AttributeProto();
895 attrib.Name =
"pads";
896 attrib.Type = AttributeProto.Types.AttributeType.Ints;
908 attrib =
new AttributeProto();
909 attrib.Name =
"dilations";
910 attrib.Type = AttributeProto.Types.AttributeType.Ints;
919 attrib =
new AttributeProto();
920 attrib.Name =
"dilations";
921 attrib.Type = AttributeProto.Types.AttributeType.Ints;
929 attrib =
new AttributeProto();
930 attrib.Name =
"group";
931 attrib.Type = AttributeProto.Types.AttributeType.Int;
936 private string addAttributes(RepeatedField<AttributeProto> rgA, RepeatedField<TensorProto> rgTensors,
DropoutParameter p)
938 AttributeProto attrib =
new AttributeProto();
939 attrib.Name =
"ratio";
940 attrib.Type = AttributeProto.Types.AttributeType.Float;
957 private void addAttributes(RepeatedField<AttributeProto> rgA,
FlattenParameter p)
959 AttributeProto attrib =
new AttributeProto();
960 attrib.Name =
"axis";
961 attrib.Type = AttributeProto.Types.AttributeType.Int;
966 private void addAttributes(RepeatedField<AttributeProto> rgA,
GatherParameter p)
968 AttributeProto attrib =
new AttributeProto();
969 attrib.Name =
"axis";
970 attrib.Type = AttributeProto.Types.AttributeType.Int;
977 AttributeProto attrib =
new AttributeProto();
978 attrib.Name =
"alpha";
979 attrib.Type = AttributeProto.Types.AttributeType.Float;
983 attrib =
new AttributeProto();
984 attrib.Name =
"beta";
985 attrib.Type = AttributeProto.Types.AttributeType.Float;
989 attrib =
new AttributeProto();
990 attrib.Name =
"transA";
991 attrib.Type = AttributeProto.Types.AttributeType.Int;
995 attrib =
new AttributeProto();
996 attrib.Name =
"transB";
997 attrib.Type = AttributeProto.Types.AttributeType.Int;
1002 private void addAttributes(RepeatedField<AttributeProto> rgA,
LRNParameter p)
1004 AttributeProto attrib =
new AttributeProto();
1005 attrib.Name =
"alpha";
1006 attrib.Type = AttributeProto.Types.AttributeType.Float;
1007 attrib.F = (float)p.
alpha;
1010 attrib =
new AttributeProto();
1011 attrib.Name =
"beta";
1012 attrib.Type = AttributeProto.Types.AttributeType.Float;
1013 attrib.F = (float)p.
beta;
1016 attrib =
new AttributeProto();
1017 attrib.Name =
"bias";
1018 attrib.Type = AttributeProto.Types.AttributeType.Float;
1019 attrib.F = (float)p.
k;
1022 attrib =
new AttributeProto();
1023 attrib.Name =
"size";
1024 attrib.Type = AttributeProto.Types.AttributeType.Int;
1029 private void addAttributes(RepeatedField<AttributeProto> rgA,
LogParameter p)
1033 private void addAttributes(RepeatedField<AttributeProto> rgA,
PoolingParameter p)
1035 AttributeProto attrib =
new AttributeProto();
1036 attrib.Name =
"kernel_shape";
1037 attrib.Type = AttributeProto.Types.AttributeType.Ints;
1044 attrib =
new AttributeProto();
1045 attrib.Name =
"strides";
1046 attrib.Type = AttributeProto.Types.AttributeType.Ints;
1055 attrib =
new AttributeProto();
1056 attrib.Name =
"pads";
1057 attrib.Type = AttributeProto.Types.AttributeType.Ints;
1068 private void addAttributes(RepeatedField<AttributeProto> rgA,
EluParameter p)
1072 private void addAttributes(RepeatedField<AttributeProto> rgA,
ExpParameter p)
1078 AttributeProto attrib =
new AttributeProto();
1079 attrib.Name =
"axes";
1080 attrib.Type = AttributeProto.Types.AttributeType.Ints;
1081 attrib.Ints.Add(p.
axis);
1086 private void addAttributes(RepeatedField<AttributeProto> rgA,
ReLUParameter p)
1090 AttributeProto attrib =
new AttributeProto();
1091 attrib.Name =
"alpha";
1092 attrib.Type = AttributeProto.Types.AttributeType.Float;
1098 private void addAttributes(RepeatedField<AttributeProto> rgA, RepeatedField<TensorProto> rgTensors,
string strName,
ReshapeParameter p,
bool bRemoveTrailingOnes)
1100 List<int> rgShape =
new List<int>();
1102 for (
int i = 0; i < p.
axis; i++)
1107 for (
int i = 0; i < p.
shape.
dim.Count; i++)
1112 if (bRemoveTrailingOnes)
1114 while (rgShape.Count > 2 && rgShape[rgShape.Count - 1] == 1)
1116 rgShape.RemoveAt(rgShape.Count - 1);
1120 if (rgShape[0] == -1)
1123 addShapeTensor(rgTensors, strName, rgShape);
1126 private void addAttributes(RepeatedField<AttributeProto> rgA,
SoftmaxParameter p)
1128 AttributeProto attrib =
new AttributeProto();
1129 attrib.Name =
"axis";
1130 attrib.Type = AttributeProto.Types.AttributeType.Int;
1137 foreach (
int nDim
in p.
dim)
1139 AttributeProto attrib =
new AttributeProto();
1140 attrib.Name =
"dim";
1141 attrib.Type = AttributeProto.Types.AttributeType.Int;
1147 private string clean(
string str)
1151 foreach (
char ch
in str)
1153 if (!
char.IsWhiteSpace(ch))
1162 private Tuple<NetParameter, BlobCollection<T>> convertToMyCaffe(
CudaDnn<T> cuda,
Log log, ModelProto proto,
bool bFixupNeuronNodes,
DatasetDescriptor dsTraining =
null)
1169 OnnxDefinitions onnx =
new OnnxDefinitions();
1173 netParam.
name = clean(proto.Graph.Name);
1174 Tuple<List<string>, List<string>> rgInputs = addInputs(proto.Graph.Input, netParam,
false);
1175 addTensors(proto.Graph.Initializer, colTensors, cuda, log);
1176 colLearnableBlobs = addLayers(proto.Graph.Node, proto.Graph.Input, netParam, colTensors, onnx, rgInputs.Item1, cuda, log,
false);
1177 addInputs(proto.Graph.Input, netParam,
true, rgInputs.Item1, rgInputs.Item2);
1179 NetParameter netParamFixed = fixupModel(netParam, colLearnableBlobs, rgInputs.Item2);
1181 if (bFixupNeuronNodes)
1182 netParamFixed = fixupModelNeuronNodes(netParamFixed);
1184 if (dsTraining !=
null)
1185 netParamFixed = fixupModelForTraining(netParamFixed, dsTraining);
1187 netParamFixed = linkEmptyBottoms(netParamFixed);
1189 netParamFixed = removeLayersWithOrphanedBottoms(netParamFixed);
1191 return new Tuple<NetParameter, BlobCollection<T>>(netParamFixed, colLearnableBlobs);
1193 catch (Exception excpt)
1195 m_strReport +=
"ERROR: " + excpt.Message + Environment.NewLine;
1202 for (
int i = 1; i < net.
layer.Count; i++)
1209 net.
layer[i].bottom.Count == 0 &&
1210 net.
layer[i-1].top.Count == 1)
1212 net.
layer[i].bottom.Add(net.
layer[i - 1].top[0]);
1221 List<int> rgRemoveIdx =
new List<int>();
1222 Dictionary<string, Tuple<int, int>> rgTopToLayerIdx =
new Dictionary<string, Tuple<int, int>>();
1225 for (
int i = 0; i < net.
layer.Count; i++)
1227 for (
int j=0; j<net.
layer[i].top.Count; j++)
1229 if (!rgTopToLayerIdx.ContainsKey(net.
layer[i].top[j]))
1230 rgTopToLayerIdx.Add(net.
layer[i].top[j],
new Tuple<int, int>(i, j));
1235 for (
int i=1; i<net.
layer.Count; i++)
1237 Dictionary<string, Tuple<int, int>> rgBtmToParentTop =
new Dictionary<string, Tuple<int, int>>();
1238 bool bMissingBtmFound =
false;
1240 foreach (
string strBtm
in net.
layer[i].bottom)
1242 if (!rgTopToLayerIdx.ContainsKey(strBtm))
1245 bMissingBtmFound =
true;
1249 rgBtmToParentTop.Add(strBtm, rgTopToLayerIdx[strBtm]);
1253 if (!bMissingBtmFound && net.
layer[i].bottom.Count >= net.
layer[i].expected_bottom.Count)
1254 rgBtmToParentTop.Clear();
1255 else if (!rgRemoveIdx.Contains(i))
1258 foreach (KeyValuePair<
string, Tuple<int, int>> kvTopInParent
in rgBtmToParentTop)
1260 if (net.
layer[i].top.Count > 0)
1261 net.
layer[kvTopInParent.Value.Item1].top[kvTopInParent.Value.Item2] = net.
layer[i].top[0];
1266 for (
int i = rgRemoveIdx.Count - 1; i >= 0; i--)
1268 net.
layer.RemoveAt(rgRemoveIdx[i]);
1276 string strName = (netParam.
input.Count > 0) ? netParam.
input[0] :
"data";
1282 dataLayerTrain.transform_param.scale = 1.0;
1283 dataLayerTrain.data_param.batch_size = 16;
1285 dataLayerTrain.top.Add(strName);
1286 dataLayerTrain.top.Add(
"label");
1294 dataLayerTest.
top.Add(strName);
1295 dataLayerTest.
top.Add(
"label");
1297 if (netParam.
input.Count == 0)
1299 if (netParam.
layer[0].bottom.Count > 0)
1300 netParam.
layer[0].bottom[0] = strName;
1302 netParam.
layer[0].bottom.Add(strName);
1305 m_strReport +=
"Removed inputs " +
Utility.ToString<
string>(netParam.
input) + Environment.NewLine;
1306 netParam.
input.Clear();
1309 m_strReport +=
"Added DATA layer '" + dataLayerTest.
name +
"' with phase TEST..." + Environment.NewLine;
1310 netParam.
layer.Insert(0, dataLayerTest);
1311 m_strReport +=
"Added DATA layer '" + dataLayerTest.
name +
"' with phase TRAIN..." + Environment.NewLine;
1312 netParam.
layer.Insert(0, dataLayerTrain);
1317 List<string> rgstrLossBottom;
1318 List<string> rgstrAccuracyBottom;
1322 m_strReport +=
"Removing last layer SOFTMAX..." + Environment.NewLine;
1323 netParam.
layer.Remove(lastLayer);
1325 rgstrAccuracyBottom =
Utility.Clone<
string>(lastLayer.
bottom);
1329 rgstrLossBottom =
Utility.Clone<
string>(lastLayer.
top);
1330 rgstrAccuracyBottom =
Utility.Clone<
string>(lastLayer.
top);
1334 loss.
top.Add(
"loss");
1335 loss.
bottom = rgstrLossBottom;
1336 loss.
bottom.Add(dataLayerTrain.top[1]);
1344 m_strReport +=
"Added new last layer SOFTMAXWITH_LOSS '" + loss.
name +
"'..." + Environment.NewLine;
1345 netParam.
layer.Add(loss);
1348 accuracy.
top.Add(
"accuracy");
1349 accuracy.
bottom = rgstrAccuracyBottom;
1350 accuracy.
bottom.Add(dataLayerTest.
top[1]);
1353 m_strReport +=
"Added new last layer ACCURACY '" + accuracy.
name +
"'..." + Environment.NewLine;
1354 netParam.
layer.Add(accuracy);
1376 private bool replaceTop(
LayerParameter p,
string strTopToReplace,
string strNewTop)
1378 for (
int i=0; i<p.
top.Count; i++)
1380 if (p.
top[i] == strTopToReplace)
1382 p.
top[i] = strNewTop;
1390 private bool replaceBtm(
LayerParameter p,
string strBtmToReplace,
string strNewBtm)
1392 for (
int i = 0; i < p.
bottom.Count; i++)
1394 if (p.
bottom[i] == strBtmToReplace)
1408 if (isNeuron(layer.
type))
1410 string strBtm = layer.
bottom[0];
1412 if (layer.
top.Count == 0)
1414 layer.
top.Add(strBtm);
1418 string strTop = layer.
top[0];
1421 List<LayerParameter> rgLayerNext = findLayersWithBtm(netParam, strTop);
1429 replaceBtm(layerNext, strTop, strBtm);
1443 int nDataInputIdx = 0;
1444 for (
int i = 0; i < p.
input.Count; i++)
1446 if (p.
input[i].Contains(
"data"))
1454 List<LayerParameter> rgLayer1 = findLayersWithBtm(p, p.
input[nDataInputIdx]);
1457 replaceBtm(layer1, p.
input[nDataInputIdx],
"data");
1458 m_strReport +=
"Changed layer '" + layer1.
name +
" (" + layer1.
type.ToString() +
")' input from '" + p.
input[nDataInputIdx] +
"' to 'data'";
1461 m_strReport +=
"Changed data input[" + nDataInputIdx.ToString() +
"] from '" + p.
input[nDataInputIdx] +
"' to 'data'";
1462 p.
input[nDataInputIdx] =
"data";
1466 List<string> rgInputs =
Utility.Clone<
string>(p.
input);
1469 foreach (
string strBtm
in layer1.
bottom)
1471 rgInputs.Remove(strBtm);
1474 if (rgInputs.Count == 0)
1478 foreach (
string strOrphanInput
in rgInputs)
1480 p.
input.Remove(strOrphanInput);
1484 LayerDataCollection rgBtmOrphans =
new LayerDataCollection(LayerData.TYPE.BTM);
1485 for (
int i = p.
layer.Count-1; i>=0; i--)
1489 rgBtmOrphans.Add(layer.
bottom, i, layer);
1490 rgBtmOrphans.Remove(layer.
top);
1493 rgBtmOrphans.Remove(p.
input);
1496 LayerDataCollection rgTopOrphans =
new LayerDataCollection(LayerData.TYPE.TOP);
1497 for (
int i=0; i<p.
layer.Count; i++)
1501 if (i < p.
layer.Count-1)
1502 rgTopOrphans.Add(layer.
top, i, layer);
1504 rgTopOrphans.Remove(layer.
bottom);
1508 if (rgTopOrphans.Count > 0)
1510 m_strReport +=
"[Found '" + rgTopOrphans.Count.ToString() +
" Top Orphans]" + Environment.NewLine;
1511 foreach (LayerData top
in rgTopOrphans)
1513 m_strReport +=
" " + top.Name +
" found in layer " + top.Layer.ToString() + Environment.NewLine;
1516 for (
int i = 0; i < p.
layer.Count; i++)
1519 m_strReport += rgTopOrphans.FixupTops(i, layer);
1524 if (rgBtmOrphans.Count > 0)
1526 m_strReport +=
"[Found '" + rgBtmOrphans.Count.ToString() +
" Bottom Orphans]" + Environment.NewLine;
1527 foreach (LayerData btm
in rgBtmOrphans)
1529 m_strReport +=
" " + btm.Name +
" found in layer " + btm.Layer.ToString() + Environment.NewLine;
1532 foreach (LayerData item
in rgBtmOrphans)
1537 if (item.Layer.bottom.Count > 1 && rgstrInvalidInput.Contains(item.Layer.bottom[1]))
1539 foreach (
string strBtm1
in item.Layer.bottom)
1541 p.
input.Remove(strBtm1);
1544 string strBtm = item.Layer.bottom[0];
1545 string strTop = (item.Layer.top.Count > 0) ? item.Layer.top[0] :
null;
1548 List<LayerParameter> rgLayerNext = findLayersWithBtm(p, strTop);
1551 m_strReport +=
"Removed RESHAPE layer..." + Environment.NewLine;
1552 p.
layer.Remove(item.Layer);
1554 if (layerPrev !=
null && rgLayerNext.Count > 0)
1561 m_strReport +=
"Removed blob '" + blob.
Name +
"' with shape '" + blob.
shape_string +
"'..." + Environment.NewLine;
1565 string strTop1 = layerPrev.top[0];
1571 for (
int i = 0; i < layerNext.
bottom.Count; i++)
1573 if (layerNext.
bottom[i] == strTop)
1575 layerNext.
bottom[i] = strTop1;
1576 m_strReport +=
"connected joining layers with '" + strTop1 +
"'...";
1595 if (layer.
top.Contains(strTop))
1602 private List<LayerParameter> findLayersWithBtm(
NetParameter net,
string strBtm)
1604 List<LayerParameter> rgLayers =
new List<LayerParameter>();
1608 if (layer.
bottom.Contains(strBtm))
1609 rgLayers.Add(layer);
1615 private Tuple<List<string>, List<string>> addInputs(RepeatedField<ValueInfoProto> rg,
NetParameter p,
bool bAdd, List<string> rgstrAvailable =
null, List<string> rgstrInvalid =
null)
1617 List<string> rgstrInput =
new List<string>();
1618 List<string> rgstrInvalid1 =
new List<string>();
1620 foreach (ValueInfoProto val
in rg)
1622 if ((rgstrAvailable ==
null || rgstrAvailable.Contains(val.Name)) && (rgstrInvalid ==
null || !rgstrInvalid.Contains(val.Name)))
1624 if (val.Type.TensorType ==
null)
1625 throw new Exception(
"Currenly only Tensor input types are supported.");
1627 if (val.Type.TensorType.ElemType != (
int)OnnxDefinitions.DataType.FLOAT &&
1628 val.Type.TensorType.ElemType != (
int)OnnxDefinitions.DataType.DOUBLE)
1629 rgstrInvalid1.Add(val.Name);
1631 rgstrInput.Add(convertWs(val.Name));
1635 p.
input.Add(convertWs(val.Name));
1636 List<int> rgShape =
new List<int>();
1638 TypeProto type = val.Type;
1639 if (type.TensorType ==
null)
1640 throw new Exception(
"Currenly only Tensor input types are supported.");
1642 TensorShapeProto shape = type.TensorType.Shape;
1643 foreach (TensorShapeProto.Types.Dimension dim in shape.Dim)
1645 rgShape.Add((
int)dim.DimValue);
1650 m_strReport +=
"Adding input '" + val.Name +
" with shape " + rgShape.ToString() + Environment.NewLine;
1655 return new Tuple<List<string>, List<string>>(rgstrInput, rgstrInvalid1);
1660 foreach (TensorProto tensor
in rg)
1662 List<int> rgShape =
new List<int>();
1664 foreach (
long lDim
in tensor.Dims)
1666 rgShape.Add((
int)lDim);
1670 blob.
Name = convertWs(tensor.Name);
1672 if (typeof(T) == typeof(
float))
1683 m_strReport +=
"Adding tensor '" + blob.
Name +
"' " + blob.
shape_string + Environment.NewLine;
1696 float[] rgData =
null;
1698 if (tensor.DataType == (
int)OnnxDefinitions.DataType.FLOAT)
1700 if (tensor.FloatData.Count > 0)
1702 rgData =
new float[tensor.FloatData.Count];
1703 for (
int i = 0; i < tensor.FloatData.Count; i++)
1705 rgData[i] = tensor.FloatData[i];
1710 byte[] rgRaw = tensor.RawData.ToByteArray();
1711 int nLen = rgRaw.Length /
sizeof(float);
1712 rgData =
new float[nLen];
1714 Buffer.BlockCopy(rgRaw, 0, rgData, 0, rgRaw.Length);
1717 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.DOUBLE)
1719 if (tensor.DoubleData.Count > 0)
1721 rgData =
new float[tensor.DoubleData.Count];
1722 for (
int i = 0; i < tensor.DoubleData.Count; i++)
1724 rgData[i] = (float)tensor.DoubleData[i];
1729 byte[] rgRaw = tensor.RawData.ToByteArray();
1730 int nLen = rgRaw.Length /
sizeof(double);
1731 double[] rgData2 =
new double[nLen];
1732 rgData =
new float[nLen];
1734 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1735 Array.Copy(rgData2, rgData, nLen);
1738 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.INT32)
1740 if (tensor.Int32Data.Count > 0)
1742 rgData =
new float[tensor.Int32Data.Count];
1743 for (
int i = 0; i < tensor.Int32Data.Count; i++)
1745 rgData[i] = (float)tensor.Int32Data[i];
1750 byte[] rgRaw = tensor.RawData.ToByteArray();
1751 int nLen = rgRaw.Length /
sizeof(int);
1752 int[] rgData2 =
new int[nLen];
1753 rgData =
new float[nLen];
1755 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1756 Array.Copy(rgData2, rgData, nLen);
1759 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.INT64)
1761 if (tensor.Int64Data.Count > 0)
1763 rgData =
new float[tensor.Int64Data.Count];
1764 for (
int i = 0; i < tensor.Int64Data.Count; i++)
1766 rgData[i] = (float)tensor.Int64Data[i];
1771 byte[] rgRaw = tensor.RawData.ToByteArray();
1772 int nLen = rgRaw.Length /
sizeof(long);
1773 long[] rgData2 =
new long[nLen];
1774 rgData =
new float[nLen];
1776 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1777 Array.Copy(rgData2, rgData, nLen);
1782 throw new Exception(
"Currently only the 'DataType.FLOAT' and 'DataType.DOUBLE' are supported for conversions to MyCaffe.");
1795 double[] rgData =
null;
1797 if (tensor.DataType == (
int)OnnxDefinitions.DataType.FLOAT)
1799 if (tensor.FloatData.Count > 0)
1801 rgData =
new double[tensor.FloatData.Count];
1802 for (
int i = 0; i < tensor.FloatData.Count; i++)
1804 rgData[i] = tensor.FloatData[i];
1809 byte[] rgRaw = tensor.RawData.ToByteArray();
1810 int nLen = rgRaw.Length /
sizeof(float);
1811 float[] rgData2 =
new float[nLen];
1812 rgData =
new double[nLen];
1814 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1815 Array.Copy(rgData2, rgData, nLen);
1818 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.DOUBLE)
1820 if (tensor.DoubleData.Count > 0)
1822 rgData =
new double[tensor.DoubleData.Count];
1823 for (
int i = 0; i < tensor.DoubleData.Count; i++)
1825 rgData[i] = (float)tensor.DoubleData[i];
1830 byte[] rgRaw = tensor.RawData.ToByteArray();
1831 int nLen = rgRaw.Length /
sizeof(double);
1832 double[] rgData2 =
new double[nLen];
1833 rgData =
new double[nLen];
1835 Buffer.BlockCopy(rgRaw, 0, rgData, 0, rgRaw.Length);
1838 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.INT32)
1840 if (tensor.Int32Data.Count > 0)
1842 rgData =
new double[tensor.Int32Data.Count];
1843 for (
int i = 0; i < tensor.Int32Data.Count; i++)
1845 rgData[i] = (float)tensor.Int32Data[i];
1850 byte[] rgRaw = tensor.RawData.ToByteArray();
1851 int nLen = rgRaw.Length /
sizeof(int);
1852 int[] rgData2 =
new int[nLen];
1853 rgData =
new double[nLen];
1855 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1856 Array.Copy(rgData2, rgData, nLen);
1859 else if (tensor.DataType == (
int)OnnxDefinitions.DataType.INT64)
1861 if (tensor.Int64Data.Count > 0)
1863 rgData =
new double[tensor.Int64Data.Count];
1864 for (
int i = 0; i < tensor.Int64Data.Count; i++)
1866 rgData[i] = (float)tensor.Int64Data[i];
1871 byte[] rgRaw = tensor.RawData.ToByteArray();
1872 int nLen = rgRaw.Length /
sizeof(long);
1873 long[] rgData2 =
new long[nLen];
1874 rgData =
new double[nLen];
1876 Buffer.BlockCopy(rgRaw, 0, rgData2, 0, rgRaw.Length);
1877 Array.Copy(rgData2, rgData, nLen);
1882 throw new Exception(
"Currently only the 'DataType.FLOAT' and 'DataType.DOUBLE' are supported for conversions to MyCaffe.");
1888 private int getOutputs(
string strLayerName,
BlobCollection<T> col,
string strWt,
string strBias, out
bool bBiasTerm,
int nAxis = 0)
1890 int? nWtOutputs =
null;
1891 int? nBiasOutputs =
null;
1897 if (blob.
Name == strWt)
1899 blob.
Tag = strLayerName;
1901 int nAxis1 = (nAxis >= 0) ? nAxis : nAxis + blob.
shape().Count;
1904 for (
int i = nAxis1; i >= 0; i--)
1906 if (blob.
shape()[i] > 1)
1914 nWtOutputs = blob.
shape()[nAxis1];
1916 else if (blob.
Name == strBias && strBias !=
null)
1918 blob.
Tag = strLayerName;
1920 nBiasOutputs = blob.
shape()[0];
1923 if (nWtOutputs.HasValue && bBiasTerm)
1927 if (!nWtOutputs.HasValue && !nBiasOutputs.HasValue)
1928 throw new Exception(
"Could not find the blob '" + strWt +
"' or the blob '" + strBias +
"'!");
1930 if (nWtOutputs.HasValue)
1931 return nWtOutputs.Value;
1933 return nBiasOutputs.Value;
1936 private string getOperator(OnnxDefinitions onnx, OnnxDefinitions.OPERATORS op)
1938 string str = onnx.GetString(op);
1940 str = op.ToString();
1945 private string convertWs(
string str)
1949 foreach (
char ch
in str)
1951 if (
char.IsWhiteSpace(ch))
1960 private void scale(
Blob<T> blob)
1962 if (!m_dfWtScaleMin.HasValue ||
double.IsNaN(m_dfWtScaleMin.Value) || !m_dfWtScaleMax.HasValue ||
double.IsNaN(m_dfWtScaleMax.Value))
1971 Dictionary<string, ConstantParameter> rgConstants =
new Dictionary<string, ConstantParameter>();
1972 List<string> rgUsedConstants =
new List<string>();
1975 NodeProto lastNode =
null;
1977 for (
int nNodeIdx=0; nNodeIdx<rg.Count; nNodeIdx++)
1979 NodeProto node = rg[nNodeIdx];
1980 List<string> rgstrLearnableBlobs =
new List<string>();
1982 bool bSkipLayer =
false;
1983 string strNodeName = convertWs(node.Name);
1985 if (
string.IsNullOrEmpty(strNodeName))
1986 strNodeName = convertWs(node.Output[0]);
1988 List<string> rgstrExcludedInputs =
new List<string>();
1990 if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Abs))
1993 layer.
name = strNodeName;
1996 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Acos))
1999 layer.
name = strNodeName;
2003 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Acosh))
2006 layer.
name = strNodeName;
2010 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Add))
2013 layer.
name = strNodeName;
2017 foreach (
string strInput
in node.Input)
2019 if (!isInputUsed(p.
layer, strInput))
2027 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ArgMin))
2030 layer.
name = strNodeName;
2034 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ArgMax))
2037 layer.
name = strNodeName;
2041 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Asin))
2044 layer.
name = strNodeName;
2048 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Asinh))
2051 layer.
name = strNodeName;
2055 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Atan))
2058 layer.
name = strNodeName;
2062 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Atanh))
2065 layer.
name = strNodeName;
2069 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.AveragePool))
2072 layer.
name = strNodeName;
2076 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.BatchNormalization))
2079 layer.
name = strNodeName;
2084 for (
int i = 1; i < node.Input.Count; i++)
2086 string strInput = convertWs(node.Input[i]);
2089 if (blob ==
null && rgConstants.ContainsKey(strInput))
2092 blob = createBlobFromConstant(constParam, cuda, log, strInput);
2093 rgUsedConstants.Add(strInput);
2098 blob.
Tag = strNodeName;
2104 colLearnable.
Add(colBlobs[2]);
2105 colLearnable.
Add(colBlobs[3]);
2110 blobVarCor.
Tag = strNodeName;
2112 colLearnable.
Add(blobVarCor);
2115 colLearnable.
Add(colBlobs[0]);
2116 colLearnable.
Add(colBlobs[1]);
2118 for (
int i = 1; i < node.Input.Count; i++)
2120 string strInput = convertWs(node.Input[i]);
2121 rgstrExcludedInputs.Add(strInput);
2125 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Clip))
2128 layer.
name = strNodeName;
2129 fillParameter(node.Attribute, layer.
clip_param);
2131 for (
int i = 1; i < node.Input.Count; i++)
2137 if (rgF.Length == 1)
2144 rgUsedConstants.Add(node.Input[i]);
2150 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Concat))
2153 layer.
name = strNodeName;
2157 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Constant))
2159 string strOutput = convertWs(node.Output[0]);
2161 if (!rgConstants.ContainsKey(strOutput))
2164 layer.
name = strNodeName;
2170 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ConstantOfShape))
2172 string strOutput = convertWs(node.Output[0]);
2175 layer.
name = strNodeName;
2180 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Conv))
2182 int nGroupReductionFactor = 1;
2183 int nFirstLearnableIdx = -1;
2184 int nAddLearnableCount = 0;
2186 for (
int i = 1; i < node.Input.Count; i++)
2188 string strInput = convertWs(node.Input[i]);
2191 if (blob ==
null && rgConstants.ContainsKey(strInput))
2194 blob = createBlobFromConstant(constParam, cuda, log, strInput);
2195 rgUsedConstants.Add(strInput);
2200 rgstrLearnableBlobs.Add(convertWs(node.Input[i]));
2202 colLearnable.
Add(blob);
2203 nAddLearnableCount++;
2207 nFirstLearnableIdx = colLearnable.
Count - 1;
2211 layer.
name = strNodeName;
2212 fillParameter(node.Attribute, layer.
convolution_param, out nGroupReductionFactor);
2214 string strWt = rgstrLearnableBlobs[0];
2215 string strBias = (rgstrLearnableBlobs.Count > 1) ? rgstrLearnableBlobs[1] :
null;
2220 List<int> rgBiasShape =
new List<int>() { colLearnable[colLearnable.
Count - 1].num, 1, 1, 1 };
2222 if (nAddLearnableCount == 1)
2225 filler.
Fill(blobBias);
2226 blobBias.
Tag = strNodeName;
2227 colLearnable.
Add(blobBias);
2232 if (nFirstLearnableIdx >= 0 && nGroupReductionFactor > 1)
2234 Blob<T> blob = colLearnable[nFirstLearnableIdx];
2239 for (
int c = 0; c < nGroupReductionFactor; c++)
2245 colLearnable[nFirstLearnableIdx] = blobNew;
2248 filler.
Fill(blobBias);
2249 blobBias.
Tag = strNodeName;
2250 colLearnable.
Add(blobBias);
2253 m_bEnableBackward =
true;
2256 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Cos))
2259 layer.
name = strNodeName;
2263 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Cosh))
2266 layer.
name = strNodeName;
2270 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Div))
2273 layer.
name = strNodeName;
2277 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Dropout))
2280 layer.
name = strNodeName;
2284 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Elu))
2287 layer.
name = strNodeName;
2290 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Exp))
2293 layer.
name = strNodeName;
2296 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Flatten))
2299 layer.
name = strNodeName;
2303 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Gather))
2306 layer.
name = strNodeName;
2310 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Gemm))
2312 int nAddLearnableCount = 0;
2314 for (
int i = 1; i < node.Input.Count; i++)
2316 string strInput = convertWs(node.Input[i]);
2317 rgstrLearnableBlobs.Add(strInput);
2320 if (blob ==
null && rgConstants.ContainsKey(strInput))
2323 blob = createBlobFromConstant(constParam, cuda, log, strInput);
2324 rgUsedConstants.Add(strInput);
2330 colLearnable.
Add(blob);
2331 nAddLearnableCount++;
2336 layer.
name = strNodeName;
2339 string strWt = rgstrLearnableBlobs[0];
2340 string strBias = (rgstrLearnableBlobs.Count > 1) ? rgstrLearnableBlobs[1] :
null;
2355 m_bEnableBackward =
true;
2358 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.LRN))
2361 layer.
name = strNodeName;
2362 fillParameter(node.Attribute, layer.
lrn_param);
2365 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Log))
2368 layer.
name = strNodeName;
2371 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.LeakyRelu))
2374 layer.
name = strNodeName;
2375 fillParameter(node.Attribute, layer.
relu_param,
true);
2378 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.MaxPool))
2381 layer.
name = strNodeName;
2385 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Min))
2388 layer.
name = strNodeName;
2392 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Max))
2395 layer.
name = strNodeName;
2399 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.MatMul))
2401 int nAddLearnableCount = 0;
2403 for (
int i = 1; i < node.Input.Count; i++)
2405 string strInput = convertWs(node.Input[i]);
2406 rgstrLearnableBlobs.Add(strInput);
2409 if (blob ==
null && rgConstants.ContainsKey(strInput))
2412 blob = createBlobFromConstant(constParam, cuda, log, strInput);
2413 rgUsedConstants.Add(strInput);
2419 colLearnable.
Add(blob);
2420 nAddLearnableCount++;
2425 layer.
name = strNodeName;
2428 string strWt = rgstrLearnableBlobs[0];
2429 string strBias = (rgstrLearnableBlobs.Count > 1) ? rgstrLearnableBlobs[1] :
null;
2432 m_bEnableBackward =
true;
2435 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Mul))
2438 layer.
name = strNodeName;
2442 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Neg))
2445 layer.
name = strNodeName;
2449 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.GlobalAveragePool))
2452 layer.
name = strNodeName;
2456 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.GlobalMaxPool))
2459 layer.
name = strNodeName;
2463 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Pow))
2466 layer.
name = strNodeName;
2469 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.PRelu))
2471 for (
int i = 1; i < node.Input.Count; i++)
2473 string strInput = convertWs(node.Input[i]);
2474 rgstrLearnableBlobs.Add(strInput);
2479 layer.
name = strNodeName;
2483 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Relu))
2486 layer.
name = strNodeName;
2487 fillParameter(node.Attribute, layer.
relu_param,
false);
2490 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Reshape))
2492 NodeProto nextNode = findNextNodeWithBtm(rg, nNodeIdx, 0);
2493 string strLastOutput =
null;
2495 if (lastNode ==
null)
2497 ValueInfoProto input = findInput(rgInputs, rg[nNodeIdx].Input[0]);
2499 strLastOutput = input.Name;
2503 strLastOutput = lastNode.Output[0];
2506 if (nextNode !=
null &&
2507 (nextNode.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.MatMul) ||
2508 nextNode.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Gemm)))
2510 nextNode.Input[0] = strLastOutput;
2514 nextNode = findNextNodeWithBtm(rg, nNodeIdx, 1);
2516 if (nextNode !=
null &&
2517 (nextNode.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.MatMul) ||
2518 nextNode.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Gemm)))
2520 nextNode.Input[1] = strLastOutput;
2525 layer.
name = strNodeName;
2526 if (!fillParameter(node.Attribute, layer.
reshape_param, col, node.Input))
2569 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Sin))
2572 layer.
name = strNodeName;
2576 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Sinh))
2579 layer.
name = strNodeName;
2583 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Sign))
2586 layer.
name = strNodeName;
2590 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Softmax))
2593 layer.
name = strNodeName;
2597 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Split))
2600 layer.
name = strNodeName;
2603 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Slice))
2606 layer.
name = strNodeName;
2610 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Sqrt))
2613 layer.
name = strNodeName;
2617 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Sub))
2620 layer.
name = strNodeName;
2624 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Tan))
2627 layer.
name = strNodeName;
2631 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Tanh))
2634 layer.
name = strNodeName;
2637 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Transpose))
2640 layer.
name = strNodeName;
2648 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Shape))
2651 layer.
name = strNodeName;
2652 fillParameter(node.Attribute, layer.
reshape_param, col, node.Input);
2655 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Squeeze))
2658 layer.
name = strNodeName;
2661 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Unsqueeze))
2664 layer.
name = strNodeName;
2667 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ReduceMin))
2670 layer.
name = strNodeName;
2675 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ReduceMax))
2678 layer.
name = strNodeName;
2683 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ReduceSum))
2686 layer.
name = strNodeName;
2691 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ReduceMean))
2694 layer.
name = strNodeName;
2699 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.ReduceSumSquare))
2702 layer.
name = strNodeName;
2708 else if (node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.NonMaxSuppression) ||
2709 node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.Cast) ||
2710 node.OpType == getOperator(onnx, OnnxDefinitions.OPERATORS.TopK))
2717 throw new Exception(
"Currently the node OpType '" + node.OpType +
"' is not supported!");
2719 foreach (
string strInput
in node.Input)
2721 string strInput1 = convertWs(strInput);
2722 if (!rgstrLearnableBlobs.Contains(strInput1) && !rgstrExcludedInputs.Contains(strInput1) && !rgUsedConstants.Contains(strInput1))
2723 layer.
bottom.Add(strInput1);
2725 foreach (
string strLearnable
in rgstrLearnableBlobs)
2727 rgstrInputs.Remove(strLearnable);
2731 foreach (
string strOutput
in node.Output)
2733 layer.
top.Add(convertWs(strOutput));
2737 for (
int i = 1; i < node.Input.Count; i++)
2739 if (!rgConstants.ContainsKey(node.Input[i]) && !rgUsedConstants.Contains(node.Input[i]) && !rgstrLearnableBlobs.Contains(node.Input[i]))
2744 string strName = convertWs(node.Input[i]);
2746 if (bIncludeConstants && isLayerUsed(rg, strName, strName))
2749 layerConst.
name = strName;
2750 layerConst.
top.Add(layerConst.
name);
2753 m_strReport +=
"Adding constant layer '" + layerConst.
ToString() +
"'" + Environment.NewLine;
2754 p.
layer.Add(layerConst);
2760 if (!bSkipLayer && !ignoreLayer(layer))
2763 if (lastLayer !=
null && lastLayerAdded !=
null && lastLayer.
name != lastLayerAdded.
name)
2764 layer.
bottom =
new List<string>(lastLayerAdded.
top);
2766 m_strReport +=
"Adding layer '" + layer.
ToString() +
"'" + Environment.NewLine;
2769 lastLayerAdded = layer;
2776 foreach (
string strUsedConstant
in rgUsedConstants)
2778 removeLayer(p.
layer, strUsedConstant);
2781 return colLearnable;
2784 private ValueInfoProto findInput(RepeatedField<ValueInfoProto> rg,
string strName)
2786 foreach (ValueInfoProto v
in rg)
2788 if (v.Name == strName)
2795 private List<int> getShape(
BlobShape shape)
2797 List<int> rgShape =
new List<int>();
2798 bool bNonOneFound =
false;
2800 for (
int i = shape.
dim.Count - 1; i >= 0; i--)
2802 if (bNonOneFound || shape.
dim[i] > 1)
2804 rgShape.Insert(0, shape.
dim[i]);
2805 if (shape.
dim[i] > 1)
2806 bNonOneFound =
true;
2813 private NodeProto findNextNodeWithBtm(RepeatedField<NodeProto> rg,
int nStartIdx,
int nIdx)
2815 string strOutput = rg[nStartIdx].Output[0];
2817 for (
int i=nStartIdx + 1; i<rg.Count; i++)
2819 foreach (
string strInput
in rg[i].Input)
2821 if (rg[i].Input.Count > nIdx && strOutput == rg[i].Input[nIdx])
2829 private void removeLayer(List<LayerParameter> rgLayers,
string strName)
2831 List<LayerParameter> rg = rgLayers.Where(p => p.
name == strName).ToList();
2833 rgLayers.Remove(rg[0]);
2844 blob.
Name = strInput;
2851 else if (constParam.
values_f.Count == 1)
2857 BlobProto proto = loadBinaryData(constParam);
2862 if (rgData.Length > 0)
2872 foreach (
string strIgnore
in m_rgstrIgnoreLayerNames)
2874 if (layer.
name.ToUpper().Contains(strIgnore.ToUpper()))
2881 private bool isInputUsed(List<LayerParameter> rgLayers,
string strBtm)
2885 if (layer.
top.Contains(strBtm))
2892 private bool isLayerUsed(RepeatedField<NodeProto> rgNodes,
string strName,
string strTop)
2894 foreach (NodeProto node
in rgNodes)
2896 if (node.Name != strName)
2898 foreach (
string strBtm
in node.Input)
2900 if (strBtm == strTop)
2921 foreach (AttributeProto attrib
in rg)
2923 if (attrib.Name ==
"epsilon")
2928 else if (attrib.Name ==
"momentum")
2936 private void fillParameter(RepeatedField<AttributeProto> rg,
ClipParameter p)
2938 foreach (AttributeProto attrib
in rg)
2940 if (attrib.Name ==
"min")
2945 else if (attrib.Name ==
"max")
2953 private void fillParameter(RepeatedField<AttributeProto> rg,
ConcatParameter p)
2955 foreach (AttributeProto attrib
in rg)
2957 if (attrib.Name ==
"axis")
2959 p.
axis = (int)attrib.I;
2965 private string cleanFileName(
string str)
2969 foreach (
char ch
in str)
2971 if (ch ==
'\\' || ch ==
'/' || ch ==
':')
2980 private void saveBinaryData(
ConstantParameter p,
string strName,
float[] rgData)
2982 string strFile = m_strOriginalPath +
"\\" + cleanFileName(strName) +
".bin";
2984 if (File.Exists(strFile))
2985 File.Delete(strFile);
2987 using (FileStream fs =
new FileStream(strFile, FileMode.CreateNew, FileAccess.Write))
2988 using (BinaryWriter bw =
new BinaryWriter(fs))
2991 proto.
data =
new List<float>(rgData);
2998 private void saveBinaryData(
ConstantParameter p,
string strName, RepeatedField<float> rgData)
3000 string strFile = m_strOriginalPath +
"\\" + strName +
".bin";
3002 if (File.Exists(strFile))
3003 File.Delete(strFile);
3005 using (FileStream fs =
new FileStream(strFile, FileMode.CreateNew, FileAccess.Write))
3006 using (BinaryWriter bw =
new BinaryWriter(fs))
3009 proto.
data = rgData.ToList();
3021 using (FileStream fs =
new FileStream(p.
binary_data_file, FileMode.Open, FileAccess.Read))
3022 using (BinaryReader br =
new BinaryReader(fs))
3030 foreach (
int nDim
in input.
shape())
3037 if (rgData.Length <= 32)
3038 p.
values_f =
new List<float>(rgData);
3040 saveBinaryData(p, input.
Name, rgData);
3043 private void fillParameter(RepeatedField<AttributeProto> rg,
string strName,
ConstantParameter p)
3045 foreach (AttributeProto attrib
in rg)
3047 if (attrib.Name ==
"value_float")
3053 else if (attrib.Name ==
"value_floats")
3056 foreach (
float f
in attrib.Floats)
3062 if (attrib.Name ==
"value_int")
3068 else if (attrib.Name ==
"value_ints")
3071 foreach (
int i
in attrib.Ints)
3077 else if (attrib.Name ==
"value")
3081 if (attrib.T.Dims.Count > 0)
3083 foreach (
long nDim
in attrib.T.Dims)
3093 if (attrib.T.DataType == (
int)OnnxDefinitions.DataType.FLOAT)
3095 if (attrib.T.FloatData.Count <= 32)
3097 foreach (
float f
in attrib.T.FloatData)
3104 saveBinaryData(p, strName, attrib.T.FloatData);
3107 else if (attrib.T.DataType == (
int)OnnxDefinitions.DataType.INT64)
3109 if (attrib.T.Int64Data.Count > 0 && attrib.T.Int64Data.Count <= 32)
3111 foreach (
float f
in attrib.T.FloatData)
3118 byte[] rgRaw = attrib.T.RawData.ToByteArray();
3119 int nLen = rgRaw.Length /
sizeof(long);
3120 long[] rgData1 =
new long[nLen];
3122 Buffer.BlockCopy(rgRaw, 0, rgData1, 0, rgRaw.Length);
3124 foreach (
long lVal
in rgData1)
3132 throw new Exception(
"The datatype '" + ((OnnxDefinitions.DataType)attrib.T.DataType).ToString() +
"' is not yet supported.");
3137 throw new Exception(
"The attribute name '" + attrib.Name +
"' is not yet supported!");
3142 private void fillParameter(RepeatedField<AttributeProto> rg,
ConvolutionParameter p, out
int nGroupReductionFactor)
3144 nGroupReductionFactor = 1;
3146 foreach (AttributeProto attrib
in rg)
3148 if (attrib.Name ==
"kernel_shape")
3150 long h = attrib.Ints[0];
3152 throw new Exception(
"Kernel height shape must be > 0!");
3154 long w = attrib.Ints[1];
3156 throw new Exception(
"Kernel width shape must be > 0!");
3170 else if (attrib.Name ==
"strides")
3172 long h = attrib.Ints[0];
3174 throw new Exception(
"stride height shape must be > 0!");
3176 long w = attrib.Ints[1];
3178 throw new Exception(
"stride width shape must be > 0!");
3182 p.
stride =
new List<uint>() { (uint)w };
3186 p.
stride =
new List<uint>();
3192 else if (attrib.Name ==
"pads")
3194 long h = attrib.Ints[0];
3196 throw new Exception(
"pad height shape must be >= 0!");
3198 long w = attrib.Ints[1];
3200 throw new Exception(
"pad width shape must be >= 0!");
3204 p.
pad =
new List<uint>() { (uint)w };
3208 p.
pad =
new List<uint>();
3214 else if (attrib.Name ==
"dilations")
3216 long d = attrib.Ints[0];
3218 throw new Exception(
"dilation shape must be >= 0!");
3220 p.
dilation =
new List<uint>() { (uint)d };
3223 else if (attrib.Name ==
"group")
3225 p.
group = (uint)attrib.I;
3229 nGroupReductionFactor = 3;
3230 p.
group /= (uint)nGroupReductionFactor;
3239 if (p.
pad.Count == 0 && !p.
pad_h.HasValue && !p.
pad_w.HasValue)
3249 private void fillParameter(RepeatedField<AttributeProto> rg,
DropoutParameter p)
3251 foreach (AttributeProto attrib
in rg)
3253 if (attrib.Name ==
"ratio")
3257 else if (attrib.Name ==
"training_mode")
3259 p.
active = (attrib.I == 0) ?
false :
true;
3261 else if (attrib.Name ==
"seed")
3268 private void fillParameter(RepeatedField<AttributeProto> rg,
FlattenParameter p)
3270 foreach (AttributeProto attrib
in rg)
3272 if (attrib.Name ==
"axis")
3274 p.
axis = (int)attrib.I;
3280 private void fillParameter(RepeatedField<AttributeProto> rg,
GatherParameter p)
3282 foreach (AttributeProto attrib
in rg)
3284 if (attrib.Name ==
"axis")
3286 p.
axis = (int)attrib.I;
3294 foreach (AttributeProto attrib
in rg)
3296 if (attrib.Name ==
"kernel_shape")
3298 long h = attrib.Ints[0];
3300 throw new Exception(
"Kernel height shape must be > 0!");
3302 long w = attrib.Ints[1];
3304 throw new Exception(
"Kernel width shape must be > 0!");
3318 else if (attrib.Name ==
"strides")
3320 long h = attrib.Ints[0];
3322 throw new Exception(
"stride height shape must be > 0!");
3324 long w = attrib.Ints[1];
3326 throw new Exception(
"stride width shape must be > 0!");
3330 p.
stride =
new List<uint>() { (uint)w };
3334 p.
stride =
new List<uint>();
3340 else if (attrib.Name ==
"pads")
3342 long h = attrib.Ints[0];
3344 throw new Exception(
"pad height shape must be >= 0!");
3346 long w = attrib.Ints[1];
3348 throw new Exception(
"pad width shape must be >= 0!");
3352 p.
pad =
new List<uint>() { (uint)w };
3356 p.
pad =
new List<uint>();
3366 if (p.
pad.Count == 0 && !p.
pad_h.HasValue && !p.
pad_w.HasValue)
3379 foreach (AttributeProto attrib
in rg)
3381 if (attrib.Name ==
"transB")
3392 private void fillParameter(RepeatedField<AttributeProto> rg,
LRNParameter p)
3394 foreach (AttributeProto attrib
in rg)
3396 if (attrib.Name ==
"alpha")
3400 else if (attrib.Name ==
"beta")
3404 else if (attrib.Name ==
"bias")
3408 else if (attrib.Name ==
"size")
3418 List<float> rgShape =
new List<float>();
3423 if (rgInputs.Count > 1)
3425 string strInput1 = convertWs(rgInputs[1]);
3431 rgShape =
new List<float>(rgData);
3433 foreach (
float fDim
in rgData)
3442 foreach (AttributeProto attrib
in rg)
3444 if (attrib.Name ==
"shape")
3446 foreach (
long lDim
in attrib.Ints)
3457 private void fillParameter(RepeatedField<AttributeProto> rg,
PReLUParameter p)
3463 foreach (AttributeProto attrib
in rg)
3465 if (attrib.Name ==
"keepdims")
3468 else if (attrib.Name ==
"axes")
3474 private void fillParameter(RepeatedField<AttributeProto> rg,
ReLUParameter p,
bool bLeaky)
3476 foreach (AttributeProto attrib
in rg)
3478 if (attrib.Name ==
"alpha")
3486 private void fillParameter(RepeatedField<AttributeProto> rg,
SliceParameter p)
3488 foreach (AttributeProto attrib
in rg)
3490 if (attrib.Name ==
"axis")
3492 p.
axis = (int)attrib.I;
3498 private void fillParameter(RepeatedField<AttributeProto> rg,
SoftmaxParameter p)
3500 foreach (AttributeProto attrib
in rg)
3502 if (attrib.Name ==
"axis")
3504 p.
axis = (int)attrib.I;
3517 List<int> rgDim =
new List<int>();
3519 foreach (AttributeProto attrib
in rg)
3521 if (attrib.Name ==
"dim")
3523 rgDim.Add((
int)attrib.I);
3525 else if (attrib.Name ==
"perm")
3527 foreach (
long val
in attrib.Ints)
3529 rgDim.Add((
int)val);
3534 if (rgDim.Count > 0)
3539#pragma warning disable 1591
3541 class LayerDataCollection : IEnumerable<LayerData>
3543 LayerData.TYPE m_type;
3544 List<LayerData> m_rgItems =
new List<LayerData>();
3546 public LayerDataCollection(LayerData.TYPE type)
3551 public LayerData.TYPE Type
3553 get {
return m_type; }
3556 public bool Contains(
string strName)
3558 foreach (LayerData item
in m_rgItems)
3560 if (item.Name == strName)
3569 List<LayerData> rgItems = FindAll(nLayerIdx);
3570 string strReport =
"";
3572 foreach (LayerData item
in rgItems)
3576 p.
top.Remove(item.Name);
3577 strReport +=
"Removed top '" + item.Name +
"' from layer '" + item.Layer.name +
"(" + item.Layer.type.ToString() +
") at layer index = " + item.LayerIndex.ToString() + Environment.NewLine;
3584 public List<LayerData> FindAll(
int nLayerIdx)
3586 return m_rgItems.Where(p => p.LayerIndex == nLayerIdx).ToList();
3591 get {
return m_rgItems.Count; }
3594 public void Add(LayerData item)
3596 m_rgItems.Add(item);
3601 foreach (
string str
in rg)
3603 m_rgItems.Add(
new LayerData(str, nIdx, layer, m_type));
3607 public void Remove(List<string> rgstr)
3609 List<int> rgDelete =
new List<int>();
3611 for (
int i = 0; i < m_rgItems.Count; i++)
3613 for (
int j = 0; j < rgstr.Count; j++)
3615 if (m_rgItems[i].Name == rgstr[j])
3620 for (
int i = rgDelete.Count - 1; i >= 0; i--)
3622 m_rgItems.RemoveAt(rgDelete[i]);
3626 public IEnumerator<LayerData> GetEnumerator()
3628 return m_rgItems.GetEnumerator();
3631 IEnumerator IEnumerable.GetEnumerator()
3633 return m_rgItems.GetEnumerator();
3636 public LayerData
this[
int nIdx]
3638 get {
return m_rgItems[nIdx]; }
3655 public LayerData(
string strName,
int nLayerIdx,
LayerParameter layer, TYPE type)
3657 m_strName = strName;
3658 m_nLayerIdx = nLayerIdx;
3665 get {
return m_strName; }
3668 public int LayerIndex
3670 get {
return m_nLayerIdx; }
3675 get {
return m_layer; }
3680 get {
return m_type; }
3683 public override string ToString()
3685 return m_strName +
"(" + m_type.
ToString() +
") at layer '" + m_layer.
ToString() +
"'(idx = " + m_nLayerIdx.ToString() +
")";
3689#pragma warning restore 1591
The MyCaffeControl is the main object used to manage all training, testing and running of the MyCaffe...
Net< T > GetInternalNet(Phase phase=Phase.RUN)
Returns the internal net based on the Phase specified: TRAIN, TEST or RUN.
Log Log
Returns the Log (for output) used.
The CancelEvent provides an extension to the manual cancel event that allows for overriding the manua...
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
bool EnableTrace
Enables/disables the Trace. When enabled, the .Net Trace.WriteLine is called in addition to the norma...
The RawProto class is used to parse and output Google prototxt file data.
override string ToString()
Returns the RawProto as its full prototxt string.
static RawProto Parse(string str)
Parses a prototxt and places it in a new RawProto.
The Utility class provides general utility funtions.
static double[] ConvertVec(float[] rgf)
Convert an array of float to an array of generics.
string Name
Get/set the name of the item.
The DatasetDescriptor class describes a dataset which contains both a training data source and testin...
SourceDescriptor TrainingSource
Get/set the training data source.
SourceDescriptor TestingSource
Get/set the testing data source.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
bool Remove(Blob< T > b)
If it exists, remove a Blob from the collection.
Blob< T > FindBlob(string strName)
Finds a given blob in the collection based on its name.
The Blob is the main holder of data that moves through the Layers of the Net.
int channels
DEPRECIATED; legacy shape accessor channels: use shape(1) instead.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
int height
DEPRECIATED; legacy shape accessor height: use shape(2) instead.
object Tag
Returns a user defined object associated with the Blob.
string shape_string
Returns a string describing the Blob's shape.
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
void scale_to_range(double dfMin, double dfMax)
Scale the data in the blob to the range [dfMin,dfMax].
int width
DEPRECIATED; legacy shape accessor width: use shape(3) instead.
List< int > shape()
Returns an array where each element contains the shape of an axis of the Blob.
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Connects Layer's together into a direct acrylic graph (DAG) specified by a NetParameter
List< Layer< T > > layers
Returns the layers.
BlobCollection< T > input_blobs
Returns the collection of input Blobs.
virtual void Dispose(bool bDisposing)
Releases all resources (GPU and Host) used by the Net.
void LoadWeights(byte[] rgWeights, IXPersist< T > persist, List< string > inputWtInfo=null, List< string > targetWtInfo=null, string strSkipBlobType=null)
Loads new weights into the Net.
BlobCollection< T > output_blobs
Returns the collection of output Blobs.
BlobCollection< T > learnable_parameters
Returns the learnable parameters.
NetParameter net_param
Returns the net parameter.
The PersistCaffe class is used to load and save weight files in the .caffemodel format.
byte[] SaveWeights(BlobCollection< T > colBlobs, bool bSaveDiffs=false)
Save the weights to a byte array.
The MyCaffeConversionControl handles converting between MyCaffe and ONNX formats. The OnnxControl is ...
static float[] getDataAsFloat(TensorProto tensor)
Converts the tensor data into an array of float.
MyCaffeConversionControl()
The constructor.
static double[] getDataAsDouble(TensorProto tensor)
Converts the tensor data into an array of double.
List< string > IgnoreLayerNames
Get/set the list of layer names to ignore (layers are ignored when they contain the text from one of ...
string ReportString
Returns the report from the conversion.
ModelProto ConvertMyCaffeToOnnx(MyCaffeControl< T > ctrl, int nOpSetVersion=9, bool bUseRawData=true, OnnxDefinitions.DataType dstDataType=OnnxDefinitions.DataType.FLOAT, Phase phase=Phase.RUN)
Convert a model currently loaded into the MyCaffeControl to an ONNX ModelProto.
void ConvertMyCaffeToOnnxFile(MyCaffeControl< T > ctrl, string strOnnxFile, int nOpSetVersion=9, bool bUseRawData=true, OnnxDefinitions.DataType dstDataType=OnnxDefinitions.DataType.FLOAT, Phase phase=Phase.RUN)
Convert a model currently loaded into the MyCaffeControl to an ONNX .onnx model file.
MyCaffeConversionControl(IContainer container)
The constructor.
void SetWeightScaling(double dfMin, double dfMax)
Set the scaling factors applied to the weights.
ModelProto ConvertMyCaffeToOnnx(CudaDnn< T > cuda, Log log, MyCaffeModelData data, int nOpSetVersion=9, bool bUseRawData=true, OnnxDefinitions.DataType dstDataType=OnnxDefinitions.DataType.FLOAT)
Convert a MyCaffe model description, weights and optionally mean image from the MyCaffe model format ...
void ConvertMyCaffeToOnnxFile(CudaDnn< T > cuda, Log log, MyCaffeModelData data, string strOutputFile, int nOpSetVersion=9, bool bUseRawData=true, OnnxDefinitions.DataType dstDataType=OnnxDefinitions.DataType.FLOAT)
Convert a MyCaffe model description, weights and optionally mean image from the MyCaffe model format ...
MyCaffeModelData ConvertOnnxToMyCaffe(CudaDnn< T > cuda, Log log, ModelProto onnxModel, bool bFixupNeuronNodes=true, bool bIncludeLastLayerWeights=false, DatasetDescriptor dsTraining=null)
Convert an ONNX ModelProto to the MyCaffe model description, weights and optionally mean image.
MyCaffeModelData ConvertOnnxToMyCaffeFromFile(CudaDnn< T > cuda, Log log, string strOnnxFile, bool bFixlupNeuronNodes=true, bool bIncludeLastLayerWeights=false, DatasetDescriptor dsTraining=null)
Convert an ONNX .onnx model file to the MyCaffe model description, weights and optionally mean image.
The MyCaffeModelData object contains the model descriptor, model weights and optionally the image mea...
string ModelDescription
Get/set the model descriptor.
byte[] Weights
Returns the model weights.
string OriginalDownloadFile
Specifies the original download file, if any.
Abstract Filler class used to fill blobs with values.
void Fill(Blob< T > b)
Fill the blob with values based on the actual filler used.
static Filler< T > Create(CudaDnn< T > cuda, Log log, FillerParameter p)
Create a new Filler instance.
An interface for the units of computation which can be composed into a Net.
LayerParameter.LayerType type
Returns the LayerType of this Layer.
LayerParameter layer_param
Returns the LayerParameter for this Layer.
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
int axis
The 'label' axis of the prediction blob, whos argmax corresponds to the predicted label – may be nega...
Specifies the parameters for the ArgMaxLayer
COMPARE_OPERATOR operation
Specifies the operation to use (default = MAX).
COMPARE_OPERATOR
Defines the compare operator to use (max or min, default = max).
Specifies the parameters for the BatchNormLayer.
bool scale_bias
Specifies to use the scale and bias terms, otherwise the scale = 1 and bias = 0 are used to form an i...
double eps
Specifies a small value to add to the variance estimate so that we don't divide by zero.
double moving_average_fraction
Specifies how much the moving average decays each iteration. Smaller values make the moving average d...
The BlobProto contains the descripion of a blob.
List< float > data
Get/set the data as a List of float.
object Load(BinaryReader br, bool bNewInstance)
Loads a BlobProto from a binary reader.
BlobShape shape
Specifies the shape of the Blob.
void Save(BinaryWriter bw)
Saves the BlobProto to a binary writer.
Specifies the shape of a Blob.
List< int > dim
The blob shape dimensions.
Stores the parameters used by the ClipLayer
double min
Specifies the min value for the Clip activation function.
double max
Specifies the max value for the Clip activation function.
Specifies the parameters for the ConcatLayer
int axis
The axis along which to concatenate – may be negative to index from the end (e.g.,...
Specifies the parameters for the ConstantLayer.
BlobShape output_shape
Specifies the output shape.
string binary_data_file
Specifies a binary data file containing the values to load.
List< float > values_f
Specifies a set of float values used to fill the output. When only one item is specified,...
Specifies the parameters for the ConvolutionLayer. The default weight filler is set to the XavierFill...
uint group
The group size for group convolution.
FillerParameter bias_filler
The filler for the bias. The default is set to use the 'constant = 0.1' filler.
bool bias_term
Whether to have bias terms or not.
uint num_output
The number of outputs for the layer.
virtual uint batch_size
Specifies the batch size.
string source
When used with the DATA parameter, specifies the data 'source' within the database....
Specifies the parameters of the DropoutLayer.
double dropout_ratio
Specifies the dropout ratio. (e.g. the probability that values will be dropped out and set to zero....
long seed
Specifies the seed used by cuDnn for random number generation.
bool active
Specifies whether or not the dropout is active or not. When inactive and training,...
Specifies the parameters for the EltwiseLayer.
EltwiseOp
Defines the operation to perform.
EltwiseOp operation
Specifies the element-wise operation.
Specifies the parameters for the EluLayer.
Specifies the parameters for the ExpLayer.
Specifies the parameters for the FlattenLayer.
int axis
Specifies the first axis to flatten: all preceding axes are retained in the output....
Specifies the parameters for the InnerProductLayer.
bool transpose
Specifies whether to transpose the weight matrix or not. If transpose == true, any operations will be...
uint num_output
The number of outputs for the layer.
bool bias_term
Whether to have bias terms or not.
uint? stride_h
The stride height (2D only)
List< uint > kernel_size
Kernel size is given as a single value for equal dimensions in all spatial dimensions,...
List< uint > dilation
Factor used to dilate the kernel, (implicitly) zero-filling the resulting holes. (Kernel dilation is ...
uint? stride_w
The stride width (2D only)
uint? pad_h
The padding height (2D only)
uint? kernel_h
The kernel height (2D only)
List< uint > stride
Stride is given as a single value for equal dimensions in all spatial dimensions, or once per spatial...
uint? kernel_w
The kernel width (2D only)
uint? pad_w
The padding width (2D only)
List< uint > pad
Pad is given as a single value for equal dimensions in all spatial dimensions, or once per spatial di...
Specifies the parameter for the LRNLayer.
double beta
Specifies the beta value used as the power parameter in the normalization formula....
uint local_size
Specifies the local size of the normalization window width.
double alpha
Specifies the alpha value used for variance scaling in the normalization formula. NOTE: cuDNN uses a ...
double k
Specifies the k value used by the normalization parameter. NOTE: cuDNN uses a default of k = 2....
Specifies the base parameter for all layers.
ConvolutionParameter convolution_param
Returns the parameter set when initialized with LayerType.CONVOLUTION
SliceParameter slice_param
Returns the parameter set when initialized with LayerType.SLICE
LogParameter log_param
Returns the parameter set when initialized with LayerType.LOG
string name
Specifies the name of this LayerParameter.
ClipParameter clip_param
Returns the parameter set when initialized with LayerType.CLIP
LayerType type
Specifies the type of this LayerParameter.
ConstantParameter constant_param
Returns the parameter set when initialized with LayerType.CONSTANT
SoftmaxParameter softmax_param
Returns the parameter set when initialized with LayerType.SOFTMAX
LRNParameter lrn_param
Returns the parameter set when initialized with LayerType.LRN
List< NetStateRule > include
Specifies the NetStateRule's for which this LayerParameter should be included.
MathParameter math_param
Returns the parameter set when initialized with LayerType.MATH
ReLUParameter relu_param
Returns the parameter set when initialized with LayerType.RELU
bool freeze_learning
Get/set whether or not to freeze the learning for this layer globally.
ArgMaxParameter argmax_param
Returns the parameter set when initialized with LayerType.ARGMAX
ReductionParameter reduction_param
Returns the parameter set when initialized with LayerType.REDUCTION
PoolingParameter pooling_param
Returns the parameter set when initialized with LayerType.POOLING
ExpParameter exp_param
Returns the parameter set when initialized with LayerType.EXP
EltwiseParameter eltwise_param
Returns the parameter set when initialized with LayerType.ELTWISE
List< string > top
Specifies the active top connections (in the bottom, out the top)
ReshapeParameter reshape_param
Returns the parameter set when initialized with LayerType.RESHAPE
EluParameter elu_param
Returns the parameter set when initialized with LayerType.ELU
InnerProductParameter inner_product_param
Returns the parameter set when initialized with LayerType.INNERPRODUCT
AccuracyParameter accuracy_param
Returns the parameter set when initialized with LayerType.ACCURACY
ConcatParameter concat_param
Returns the parameter set when initialized with LayerType.CONCAT
GatherParameter gather_param
Returns the parameter set when initialized with LayerType.GATHER
TransformationParameter transform_param
Returns the parameter set when initialized with LayerType.TRANSFORM
TransposeParameter transpose_param
Returns the parameter set when initialized with LayerType.TRANSPOSE
DataParameter data_param
Returns the parameter set when initialized with LayerType.DATA
FlattenParameter flatten_param
Returns the parameter set when initialized with LayerType.FLATTEN
BatchNormParameter batch_norm_param
Returns the parameter set when initialized with LayerType.BATCHNORM
List< string > bottom
Specifies the active bottom connections (in the bottom, out the top).
LayerType
Specifies the layer type.
override string ToString()
Returns a string representation of the LayerParameter.
DropoutParameter dropout_param
Returns the parameter set when initialized with LayerType.DROPOUT
PReLUParameter prelu_param
Returns the parameter set when initialized with LayerType.PRELU
Specifies the parameters for the LogLayer.
Specifies the parameters for the MathLayer.
MyCaffe.common.MATH_FUNCTION function
Get/set the function to run.
Specifies the parameters use to create a Net
static NetParameter FromProto(RawProto rp)
Parse a RawProto into a new instance of the parameter.
List< string > input
The input blobs to the network.
string name
The name of the network.
override RawProto ToProto(string strName)
Constructor for the parameter.
List< LayerParameter > layer
The layers that make up the net. Each of their configurations, including connectivity and behavior,...
void SetPoolingReshapeAlgorithm(PoolingParameter.PoolingReshapeAlgorithm alg)
Sets all pooling layers to use the specified reshape algorithm.
NetParameter Clone(bool bCloneLayers=true, int? nSolverCount=null, int? nSolverRank=null)
Creates a new copy of this instance of the parameter.
List< BlobShape > input_shape
The shape of the input blobs.
Specifies a NetStateRule used to determine whether a Net falls within a given include or exclude patt...
Specifies the parameters for the PReLULayer.
Specifies the parameters for the PoolingLayer.
PoolingReshapeAlgorithm
Defines the pooling reshape algorithm to use.
PoolingMethod
Defines the pooling method.
PoolingMethod pool
Specifies the pooling method.
bool global_pooling
Specifies whether or not to enable global pooling.
PoolingReshapeAlgorithm reshape_algorithm
Specifies the reshape algorithm to use, either the original Caffe reshape (default = false) or the ne...
Specifies the parameters for the ReLULayer
double negative_slope
Specifies the negative slope. Allow non-zero slope for negative inputs to speed up optimization.
Specifies the parameters used by ReductionLayer.
int axis
The first axis to reduce to scalar – may be negative index from the end (e.g., -1 for the last axis)....
ReductionOp
Defines the reduction operation.
ReductionOp operation
Specifies the reduction operation.
Specifies the parameters for the ReshapeLayer.
int axis
Specifies the axis portion of the bottom blob's shape that is replaced by (included in) the reshape....
BlobShape shape
Specifies the output dimensions.
Specifies the parameters for the SliceLayer.
int axis
Specifies the axis along wich to slice – may be negative to index from the end (e....
Specifies the parameters for the SoftmaxLayer
int axis
The axis along which to perform the softmax – may be negative to index from the end (e....
Specifies the parameters for the GatherLayer.
int axis
Specifies the first axis to gather: all preceding axes are retained in the output....
Specifies the parameters for the TransposeLayer.
List< int > dim
Specifies the dimensions to transpose.
The Component class is a standard Microsoft.NET class that implements the IComponent interface and is...
The descriptors namespace contains all descriptor used to describe various items stored within the da...
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Phase
Defines the Phase under which to run a Net.
The MyCaffe.common namespace contains common MyCaffe classes.
MATH_FUNCTION
Defines the mathematical function to run.
The MyCaffe.converter.onnx namespace contains the objects used to convert to/from the MyCaffe and CAF...
The MyCaffe.fillers namespace contains all fillers including the Filler class.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param.beta parameters are used by the MyCaffe.layer.beta layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...