Skip to content
This repository was archived by the owner on Dec 21, 2023. It is now read-only.

Commit a6c7ce4

Browse files
authored
Updates to model_spec (#3111)
Plus some useful utilies for style transfer experimentation
1 parent f30f7ae commit a6c7ce4

File tree

4 files changed

+360
-12
lines changed

4 files changed

+360
-12
lines changed

src/ml/neural_net/model_spec.cpp

Lines changed: 197 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <algorithm>
1010
#include <fstream>
1111
#include <memory>
12+
#include <string>
1213
#include <vector>
1314

1415
#include <core/logging/assertions.hpp>
@@ -20,19 +21,31 @@ namespace neural_net {
2021

2122
namespace {
2223

24+
using CoreML::Specification::AddBroadcastableLayerParams;
2325
using CoreML::Specification::BatchnormLayerParams;
2426
using CoreML::Specification::BorderAmounts_EdgeSizes;
27+
using CoreML::Specification::ConcatNDLayerParams;
2528
using CoreML::Specification::ConvolutionLayerParams;
29+
using CoreML::Specification::ExpandDimsLayerParams;
30+
using CoreML::Specification::GatherLayerParams;
31+
using CoreML::Specification::GetShapeLayerParams;
2632
using CoreML::Specification::InnerProductLayerParams;
33+
using CoreML::Specification::LoadConstantNDLayerParams;
2734
using CoreML::Specification::Model;
2835
using CoreML::Specification::NeuralNetwork;
2936
using CoreML::Specification::NeuralNetworkImageScaler;
3037
using CoreML::Specification::NeuralNetworkLayer;
3138
using CoreML::Specification::NeuralNetworkPreprocessing;
3239
using CoreML::Specification::PaddingLayerParams;
40+
using CoreML::Specification::PaddingLayerParams_PaddingConstant;
3341
using CoreML::Specification::Pipeline;
3442
using CoreML::Specification::PoolingLayerParams;
43+
using CoreML::Specification::ReshapeDynamicLayerParams;
44+
using CoreML::Specification::ReshapeStaticLayerParams;
3545
using CoreML::Specification::SamePadding;
46+
using CoreML::Specification::SplitNDLayerParams;
47+
using CoreML::Specification::SqueezeLayerParams;
48+
using CoreML::Specification::TransposeLayerParams;
3649
using CoreML::Specification::UniDirectionalLSTMLayerParams;
3750
using CoreML::Specification::UpsampleLayerParams;
3851
using CoreML::Specification::WeightParams;
@@ -642,10 +655,10 @@ void model_spec::add_convolution(
642655
}
643656
}
644657

645-
void model_spec::add_padding(
646-
const std::string& name, const std::string& input,
647-
size_t padding_top, size_t padding_bottom, size_t padding_left,
648-
size_t padding_right) {
658+
void model_spec::add_padding(const std::string& name, const std::string& input,
659+
size_t padding_top, size_t padding_bottom,
660+
size_t padding_left, size_t padding_right,
661+
padding_policy policy) {
649662
NeuralNetworkLayer* layer = impl_->add_layers();
650663
layer->set_name(name);
651664
layer->add_input(input);
@@ -662,12 +675,19 @@ void model_spec::add_padding(
662675
left_right->set_startedgesize(padding_left);
663676
left_right->set_endedgesize(padding_right);
664677

665-
/**
666-
* TODO: Currently we only handle reflective padding in our CoreMLmodels.
667-
* If you need to support more type of padding in this particular layer
668-
* please modify the code below to extent functionality.
669-
*/
670-
params->mutable_reflection();
678+
switch (policy) {
679+
case padding_policy::REFLECTIVE:
680+
params->mutable_reflection();
681+
break;
682+
case padding_policy::REPLICATION:
683+
params->mutable_replication();
684+
break;
685+
case padding_policy::ZERO:
686+
PaddingLayerParams_PaddingConstant* constant_padding =
687+
params->mutable_constant();
688+
constant_padding->set_value(0);
689+
break;
690+
}
671691
}
672692

673693
void model_spec::add_upsampling(
@@ -977,6 +997,173 @@ void model_spec::add_preprocessing(const std::string& feature_name,
977997
image_scaler->set_channelscale(image_scale);
978998
}
979999

1000+
void model_spec::add_transpose(const std::string& name,
1001+
const std::string& input,
1002+
std::vector<size_t> axes) {
1003+
NeuralNetworkLayer* layer = impl_->add_layers();
1004+
layer->set_name(name);
1005+
layer->add_input(input);
1006+
layer->add_output(name);
1007+
TransposeLayerParams* params = layer->mutable_transpose();
1008+
for (size_t a : axes) {
1009+
params->add_axes(a);
1010+
}
1011+
}
1012+
1013+
void model_spec::add_split_nd(const std::string& name, const std::string& input,
1014+
size_t axis, size_t num_splits,
1015+
const std::vector<size_t>& split_sizes) {
1016+
NeuralNetworkLayer* layer = impl_->add_layers();
1017+
layer->set_name(name);
1018+
layer->add_input(input);
1019+
1020+
for (size_t i = 0; i < num_splits; i++) {
1021+
layer->add_output(name + "_" + std::to_string(i));
1022+
}
1023+
1024+
SplitNDLayerParams* params = layer->mutable_splitnd();
1025+
params->set_axis(axis);
1026+
params->set_numsplits(num_splits);
1027+
for (size_t s : split_sizes) {
1028+
params->add_splitsizes(s);
1029+
}
1030+
}
1031+
1032+
void model_spec::add_concat_nd(const std::string& name,
1033+
const std::vector<std::string>& inputs,
1034+
size_t axis) {
1035+
NeuralNetworkLayer* layer = impl_->add_layers();
1036+
layer->set_name(name);
1037+
for (const std::string& input : inputs) {
1038+
layer->add_input(input);
1039+
}
1040+
layer->add_output(name);
1041+
1042+
ConcatNDLayerParams* params = layer->mutable_concatnd();
1043+
params->set_axis(axis);
1044+
}
1045+
1046+
void model_spec::add_reshape_static(const std::string& name,
1047+
const std::string& input,
1048+
const std::vector<size_t>& targetShape) {
1049+
NeuralNetworkLayer* layer = impl_->add_layers();
1050+
layer->set_name(name);
1051+
layer->add_input(input);
1052+
layer->add_output(name);
1053+
1054+
ReshapeStaticLayerParams* params = layer->mutable_reshapestatic();
1055+
for (size_t i = 0; i < targetShape.size(); i++) {
1056+
params->add_targetshape(targetShape[i]);
1057+
}
1058+
}
1059+
1060+
void model_spec::add_reshape_dynamic(const std::string& name,
1061+
const std::vector<std::string>& inputs) {
1062+
NeuralNetworkLayer* layer = impl_->add_layers();
1063+
layer->set_name(name);
1064+
for (const std::string& input : inputs) {
1065+
layer->add_input(input);
1066+
}
1067+
layer->add_output(name);
1068+
layer->mutable_reshapedynamic();
1069+
}
1070+
1071+
void model_spec::add_expand_dims(const std::string& name,
1072+
const std::string& input,
1073+
const std::vector<size_t>& axes,
1074+
const std::vector<size_t>& inputVector,
1075+
const std::vector<size_t>& outputVector) {
1076+
NeuralNetworkLayer* layer = impl_->add_layers();
1077+
layer->set_name(name);
1078+
layer->add_input(input);
1079+
auto* inputTensor = layer->add_inputtensor();
1080+
inputTensor->set_rank(static_cast<unsigned>(inputVector.size()));
1081+
for (size_t i = 0; i < inputVector.size(); ++i) {
1082+
inputTensor->add_dimvalue(inputVector[i]);
1083+
}
1084+
layer->add_output(name);
1085+
auto* outputTensor = layer->add_outputtensor();
1086+
outputTensor->set_rank(static_cast<unsigned>(outputVector.size()));
1087+
for (size_t i = 0; i < outputVector.size(); ++i) {
1088+
outputTensor->add_dimvalue(outputVector[i]);
1089+
}
1090+
ExpandDimsLayerParams* params = layer->mutable_expanddims();
1091+
for (size_t i = 0; i < axes.size(); ++i) {
1092+
params->add_axes(axes[i]);
1093+
}
1094+
}
1095+
1096+
void model_spec::add_squeeze(const std::string& name, const std::string& input,
1097+
const std::vector<size_t>& axes,
1098+
const std::vector<size_t>& inputVector,
1099+
const std::vector<size_t>& outputVector) {
1100+
NeuralNetworkLayer* layer = impl_->add_layers();
1101+
layer->set_name(name);
1102+
layer->add_input(input);
1103+
auto* inputTensor = layer->add_inputtensor();
1104+
inputTensor->set_rank(static_cast<unsigned>(inputVector.size()));
1105+
for (size_t i = 0; i < inputVector.size(); ++i) {
1106+
inputTensor->add_dimvalue(inputVector[i]);
1107+
}
1108+
layer->add_output(name);
1109+
auto* outputTensor = layer->add_outputtensor();
1110+
outputTensor->set_rank(static_cast<unsigned>(outputVector.size()));
1111+
for (size_t i = 0; i < outputVector.size(); ++i) {
1112+
outputTensor->add_dimvalue(outputVector[i]);
1113+
}
1114+
1115+
SqueezeLayerParams* params = layer->mutable_squeeze();
1116+
for (size_t i = 0; i < axes.size(); ++i) {
1117+
params->add_axes(axes[i]);
1118+
}
1119+
}
1120+
1121+
void model_spec::add_add_broadcastable(const std::string& name,
1122+
const std::vector<std::string>& inputs) {
1123+
NeuralNetworkLayer* layer = impl_->add_layers();
1124+
layer->set_name(name);
1125+
for (const std::string& input : inputs) {
1126+
layer->add_input(input);
1127+
}
1128+
layer->add_output(name);
1129+
layer->mutable_addbroadcastable();
1130+
}
1131+
1132+
void model_spec::add_gather(const std::string& name,
1133+
const std::vector<std::string>& inputs) {
1134+
NeuralNetworkLayer* layer = impl_->add_layers();
1135+
layer->set_name(name);
1136+
for (const std::string& input : inputs) {
1137+
layer->add_input(input);
1138+
}
1139+
layer->add_output(name);
1140+
layer->mutable_gather();
1141+
}
1142+
1143+
void model_spec::add_constant_nd(const std::string& name,
1144+
const std::vector<size_t>& shape,
1145+
const weight_initializer& data) {
1146+
NeuralNetworkLayer* layer = impl_->add_layers();
1147+
layer->set_name(name);
1148+
layer->add_output(name);
1149+
LoadConstantNDLayerParams* params = layer->mutable_loadconstantnd();
1150+
size_t size = 1;
1151+
for (size_t i = 0; i < shape.size(); ++i) {
1152+
params->add_shape(shape[i]);
1153+
size *= shape[i];
1154+
}
1155+
init_weight_params(params->mutable_data(), size, data);
1156+
}
1157+
1158+
void model_spec::add_get_shape(const std::string& name,
1159+
const std::string& input) {
1160+
NeuralNetworkLayer* layer = impl_->add_layers();
1161+
layer->set_name(name);
1162+
layer->add_input(input);
1163+
layer->add_output(name);
1164+
layer->mutable_getshape();
1165+
}
1166+
9801167
pipeline_spec::pipeline_spec(std::unique_ptr<Pipeline> impl)
9811168
: impl_(std::move(impl)) {}
9821169

0 commit comments

Comments
 (0)