9
9
#include < algorithm>
10
10
#include < fstream>
11
11
#include < memory>
12
+ #include < string>
12
13
#include < vector>
13
14
14
15
#include < core/logging/assertions.hpp>
@@ -20,19 +21,31 @@ namespace neural_net {
20
21
21
22
namespace {
22
23
24
+ using CoreML::Specification::AddBroadcastableLayerParams;
23
25
using CoreML::Specification::BatchnormLayerParams;
24
26
using CoreML::Specification::BorderAmounts_EdgeSizes;
27
+ using CoreML::Specification::ConcatNDLayerParams;
25
28
using CoreML::Specification::ConvolutionLayerParams;
29
+ using CoreML::Specification::ExpandDimsLayerParams;
30
+ using CoreML::Specification::GatherLayerParams;
31
+ using CoreML::Specification::GetShapeLayerParams;
26
32
using CoreML::Specification::InnerProductLayerParams;
33
+ using CoreML::Specification::LoadConstantNDLayerParams;
27
34
using CoreML::Specification::Model;
28
35
using CoreML::Specification::NeuralNetwork;
29
36
using CoreML::Specification::NeuralNetworkImageScaler;
30
37
using CoreML::Specification::NeuralNetworkLayer;
31
38
using CoreML::Specification::NeuralNetworkPreprocessing;
32
39
using CoreML::Specification::PaddingLayerParams;
40
+ using CoreML::Specification::PaddingLayerParams_PaddingConstant;
33
41
using CoreML::Specification::Pipeline;
34
42
using CoreML::Specification::PoolingLayerParams;
43
+ using CoreML::Specification::ReshapeDynamicLayerParams;
44
+ using CoreML::Specification::ReshapeStaticLayerParams;
35
45
using CoreML::Specification::SamePadding;
46
+ using CoreML::Specification::SplitNDLayerParams;
47
+ using CoreML::Specification::SqueezeLayerParams;
48
+ using CoreML::Specification::TransposeLayerParams;
36
49
using CoreML::Specification::UniDirectionalLSTMLayerParams;
37
50
using CoreML::Specification::UpsampleLayerParams;
38
51
using CoreML::Specification::WeightParams;
@@ -642,10 +655,10 @@ void model_spec::add_convolution(
642
655
}
643
656
}
644
657
645
- void model_spec::add_padding (
646
- const std::string& name, const std::string& input ,
647
- size_t padding_top, size_t padding_bottom , size_t padding_left ,
648
- size_t padding_right ) {
658
+ void model_spec::add_padding (const std::string& name, const std::string& input,
659
+ size_t padding_top, size_t padding_bottom ,
660
+ size_t padding_left , size_t padding_right ,
661
+ padding_policy policy ) {
649
662
NeuralNetworkLayer* layer = impl_->add_layers ();
650
663
layer->set_name (name);
651
664
layer->add_input (input);
@@ -662,12 +675,19 @@ void model_spec::add_padding(
662
675
left_right->set_startedgesize (padding_left);
663
676
left_right->set_endedgesize (padding_right);
664
677
665
- /* *
666
- * TODO: Currently we only handle reflective padding in our CoreMLmodels.
667
- * If you need to support more type of padding in this particular layer
668
- * please modify the code below to extent functionality.
669
- */
670
- params->mutable_reflection ();
678
+ switch (policy) {
679
+ case padding_policy::REFLECTIVE:
680
+ params->mutable_reflection ();
681
+ break ;
682
+ case padding_policy::REPLICATION:
683
+ params->mutable_replication ();
684
+ break ;
685
+ case padding_policy::ZERO:
686
+ PaddingLayerParams_PaddingConstant* constant_padding =
687
+ params->mutable_constant ();
688
+ constant_padding->set_value (0 );
689
+ break ;
690
+ }
671
691
}
672
692
673
693
void model_spec::add_upsampling (
@@ -977,6 +997,173 @@ void model_spec::add_preprocessing(const std::string& feature_name,
977
997
image_scaler->set_channelscale (image_scale);
978
998
}
979
999
1000
+ void model_spec::add_transpose (const std::string& name,
1001
+ const std::string& input,
1002
+ std::vector<size_t > axes) {
1003
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1004
+ layer->set_name (name);
1005
+ layer->add_input (input);
1006
+ layer->add_output (name);
1007
+ TransposeLayerParams* params = layer->mutable_transpose ();
1008
+ for (size_t a : axes) {
1009
+ params->add_axes (a);
1010
+ }
1011
+ }
1012
+
1013
+ void model_spec::add_split_nd (const std::string& name, const std::string& input,
1014
+ size_t axis, size_t num_splits,
1015
+ const std::vector<size_t >& split_sizes) {
1016
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1017
+ layer->set_name (name);
1018
+ layer->add_input (input);
1019
+
1020
+ for (size_t i = 0 ; i < num_splits; i++) {
1021
+ layer->add_output (name + " _" + std::to_string (i));
1022
+ }
1023
+
1024
+ SplitNDLayerParams* params = layer->mutable_splitnd ();
1025
+ params->set_axis (axis);
1026
+ params->set_numsplits (num_splits);
1027
+ for (size_t s : split_sizes) {
1028
+ params->add_splitsizes (s);
1029
+ }
1030
+ }
1031
+
1032
+ void model_spec::add_concat_nd (const std::string& name,
1033
+ const std::vector<std::string>& inputs,
1034
+ size_t axis) {
1035
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1036
+ layer->set_name (name);
1037
+ for (const std::string& input : inputs) {
1038
+ layer->add_input (input);
1039
+ }
1040
+ layer->add_output (name);
1041
+
1042
+ ConcatNDLayerParams* params = layer->mutable_concatnd ();
1043
+ params->set_axis (axis);
1044
+ }
1045
+
1046
+ void model_spec::add_reshape_static (const std::string& name,
1047
+ const std::string& input,
1048
+ const std::vector<size_t >& targetShape) {
1049
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1050
+ layer->set_name (name);
1051
+ layer->add_input (input);
1052
+ layer->add_output (name);
1053
+
1054
+ ReshapeStaticLayerParams* params = layer->mutable_reshapestatic ();
1055
+ for (size_t i = 0 ; i < targetShape.size (); i++) {
1056
+ params->add_targetshape (targetShape[i]);
1057
+ }
1058
+ }
1059
+
1060
+ void model_spec::add_reshape_dynamic (const std::string& name,
1061
+ const std::vector<std::string>& inputs) {
1062
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1063
+ layer->set_name (name);
1064
+ for (const std::string& input : inputs) {
1065
+ layer->add_input (input);
1066
+ }
1067
+ layer->add_output (name);
1068
+ layer->mutable_reshapedynamic ();
1069
+ }
1070
+
1071
+ void model_spec::add_expand_dims (const std::string& name,
1072
+ const std::string& input,
1073
+ const std::vector<size_t >& axes,
1074
+ const std::vector<size_t >& inputVector,
1075
+ const std::vector<size_t >& outputVector) {
1076
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1077
+ layer->set_name (name);
1078
+ layer->add_input (input);
1079
+ auto * inputTensor = layer->add_inputtensor ();
1080
+ inputTensor->set_rank (static_cast <unsigned >(inputVector.size ()));
1081
+ for (size_t i = 0 ; i < inputVector.size (); ++i) {
1082
+ inputTensor->add_dimvalue (inputVector[i]);
1083
+ }
1084
+ layer->add_output (name);
1085
+ auto * outputTensor = layer->add_outputtensor ();
1086
+ outputTensor->set_rank (static_cast <unsigned >(outputVector.size ()));
1087
+ for (size_t i = 0 ; i < outputVector.size (); ++i) {
1088
+ outputTensor->add_dimvalue (outputVector[i]);
1089
+ }
1090
+ ExpandDimsLayerParams* params = layer->mutable_expanddims ();
1091
+ for (size_t i = 0 ; i < axes.size (); ++i) {
1092
+ params->add_axes (axes[i]);
1093
+ }
1094
+ }
1095
+
1096
+ void model_spec::add_squeeze (const std::string& name, const std::string& input,
1097
+ const std::vector<size_t >& axes,
1098
+ const std::vector<size_t >& inputVector,
1099
+ const std::vector<size_t >& outputVector) {
1100
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1101
+ layer->set_name (name);
1102
+ layer->add_input (input);
1103
+ auto * inputTensor = layer->add_inputtensor ();
1104
+ inputTensor->set_rank (static_cast <unsigned >(inputVector.size ()));
1105
+ for (size_t i = 0 ; i < inputVector.size (); ++i) {
1106
+ inputTensor->add_dimvalue (inputVector[i]);
1107
+ }
1108
+ layer->add_output (name);
1109
+ auto * outputTensor = layer->add_outputtensor ();
1110
+ outputTensor->set_rank (static_cast <unsigned >(outputVector.size ()));
1111
+ for (size_t i = 0 ; i < outputVector.size (); ++i) {
1112
+ outputTensor->add_dimvalue (outputVector[i]);
1113
+ }
1114
+
1115
+ SqueezeLayerParams* params = layer->mutable_squeeze ();
1116
+ for (size_t i = 0 ; i < axes.size (); ++i) {
1117
+ params->add_axes (axes[i]);
1118
+ }
1119
+ }
1120
+
1121
+ void model_spec::add_add_broadcastable (const std::string& name,
1122
+ const std::vector<std::string>& inputs) {
1123
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1124
+ layer->set_name (name);
1125
+ for (const std::string& input : inputs) {
1126
+ layer->add_input (input);
1127
+ }
1128
+ layer->add_output (name);
1129
+ layer->mutable_addbroadcastable ();
1130
+ }
1131
+
1132
+ void model_spec::add_gather (const std::string& name,
1133
+ const std::vector<std::string>& inputs) {
1134
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1135
+ layer->set_name (name);
1136
+ for (const std::string& input : inputs) {
1137
+ layer->add_input (input);
1138
+ }
1139
+ layer->add_output (name);
1140
+ layer->mutable_gather ();
1141
+ }
1142
+
1143
+ void model_spec::add_constant_nd (const std::string& name,
1144
+ const std::vector<size_t >& shape,
1145
+ const weight_initializer& data) {
1146
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1147
+ layer->set_name (name);
1148
+ layer->add_output (name);
1149
+ LoadConstantNDLayerParams* params = layer->mutable_loadconstantnd ();
1150
+ size_t size = 1 ;
1151
+ for (size_t i = 0 ; i < shape.size (); ++i) {
1152
+ params->add_shape (shape[i]);
1153
+ size *= shape[i];
1154
+ }
1155
+ init_weight_params (params->mutable_data (), size, data);
1156
+ }
1157
+
1158
+ void model_spec::add_get_shape (const std::string& name,
1159
+ const std::string& input) {
1160
+ NeuralNetworkLayer* layer = impl_->add_layers ();
1161
+ layer->set_name (name);
1162
+ layer->add_input (input);
1163
+ layer->add_output (name);
1164
+ layer->mutable_getshape ();
1165
+ }
1166
+
980
1167
pipeline_spec::pipeline_spec (std::unique_ptr<Pipeline> impl)
981
1168
: impl_(std::move(impl)) {}
982
1169
0 commit comments