diff --git a/src/server/codegen/knowledgepack_codegen_mixin.py b/src/server/codegen/knowledgepack_codegen_mixin.py index 2043644e..827e10f0 100755 --- a/src/server/codegen/knowledgepack_codegen_mixin.py +++ b/src/server/codegen/knowledgepack_codegen_mixin.py @@ -206,20 +206,19 @@ def create_tensorflow_build_flags(self, models): for model in models: if self.is_tensorflow(model["classifier_config"]["classifier"]): if self.nn_inference_engine == "nnom": - #TODO: NNoM + # TODO: NNoM pass if self.nn_inference_engine == "tf_micro": return tf_micro_cflags - return "" def create_kb_model_tf_micro_binary(self, models): for model in models: if self.is_tensorflow(model["classifier_config"]["classifier"]): if self.nn_inference_engine == "nnom": - #TODO: NNoM + # TODO: NNoM pass if self.nn_inference_engine == "tf_micro": @@ -233,6 +232,9 @@ def create_sml_classification_result_info(self, models_data): model_fill["TensorFlow Lite for Microcontrollers"] = ( "{\n\ttf_micro_model_results_object(kb_models[model_index].classifier_id, (model_results_t *)model_results);\n}" ) + model_fill["Neural Network"] = ( + "{\n\ttf_micro_model_results_object(kb_models[model_index].classifier_id, (model_results_t *)model_results);\n}" + ) model_fill["Decision Tree Ensemble"] = ( "{\n\ttree_ensemble_model_results_object(kb_models[model_index].classifier_id, (model_results_t *)model_results);\n}" @@ -256,6 +258,20 @@ def create_sml_classification_result_print_info(self, models_data): """ + model_fill[ + "Neural Network" + ] = """{ + sml_classification_result_info(model_index, &model_result);\n + pbuf += sprintf(pbuf, ",\\"ModelDebug\\":["); + for (int32_t i=0; iclassifier_id, kb_model->pfeature_vector, kb_model->pmodel_results);", ) - if self.nn_inference_engine=='nnom': - output_str += c_line( + if self.nn_inference_engine == "nnom": + output_str += c_line( 1, "ret = nnom_simple_submit(kb_model->classifier_id, kb_model->pfeature_vector, kb_model->pmodel_results);", ) diff --git a/src/server/codegen/knowledgepack_model_graph_mixin.py b/src/server/codegen/knowledgepack_model_graph_mixin.py index 43d065f4..faab6e37 100644 --- a/src/server/codegen/knowledgepack_model_graph_mixin.py +++ b/src/server/codegen/knowledgepack_model_graph_mixin.py @@ -614,7 +614,7 @@ def get_classifier_init(self, knowledgepack): elif classifier_config["classifier"] in [ "TF Micro", "TensorFlow Lite for Microcontrollers", - "Neural Network" + "Neural Network", ]: return {"classifier": "Neural Network"} @@ -1618,5 +1618,7 @@ def get_classifier_type(knowledgepack): if classifier_type == "tensorflow_lite_for_microcontrollers": classifier_type = "tf_micro" + if classifier_type == "neural_network": + classifier_type = "tf_micro" return classifier_type diff --git a/src/server/codegen/model_gen/model_gen.py b/src/server/codegen/model_gen/model_gen.py index e4f5d124..11e95ca2 100644 --- a/src/server/codegen/model_gen/model_gen.py +++ b/src/server/codegen/model_gen/model_gen.py @@ -24,7 +24,7 @@ linear_regression, pme, tf_micro, - nnom + nnom, ) from django.core.exceptions import ValidationError @@ -35,12 +35,13 @@ "bonsai", "pme", "linear_regression", - "nnom" + "nnom", ] CLASSIFER_MAP = { "decision tree ensemble": "decision_tree_ensemble", "tensorflow lite for microcontrollers": "tf_micro", + "Neural Network": "tf_micro", "nnom": "nnom", "pme": "pme", "boosted tree ensemble": "boosted_tree_ensemble", @@ -61,7 +62,7 @@ def get_classifier_type(model_configuration): return classifier_type.lower() -#TODO: Make this an interface that returns the object instead of having all of these if statements +# TODO: Make this an interface that returns the object instead of having all of these if statements class ModelGen: @staticmethod def create_classifier_structures(classifier_type, kb_models): @@ -82,10 +83,10 @@ def create_classifier_structures(classifier_type, kb_models): if classifier_type == "linear_regression": return linear_regression.create_classifier_structures(kb_models) - + if classifier_type == "nnom": return nnom.create_classifier_structures(kb_models) - + return "" @staticmethod @@ -107,7 +108,7 @@ def create_max_tmp_parameters(classifier_type, kb_models): if classifier_type == "linear_regression": return linear_regression.create_max_tmp_parameters(kb_models) - + if classifier_type == "nnom": return nnom.create_max_tmp_parameters(kb_models) @@ -151,7 +152,7 @@ def validate_model_parameters(model_parameters, model_configuration): if classifier_type == "linear_regression": return linear_regression.validate_model_parameters(model_parameters) - + if classifier_type == "nnom": return nnom.validate_model_parameters(model_parameters) @@ -180,8 +181,7 @@ def validate_model_configuration(model_configuration): if classifier_type == "linear_regression": return linear_regression.validate_model_configuration(model_configuration) - - + if classifier_type == "nnom": return nnom.validate_model_configuration(model_configuration) @@ -204,7 +204,7 @@ def get_output_tensor_size(classifier_type, model): if classifier_type == "linear_regression": return linear_regression.get_output_tensor_size(model) - + if classifier_type == "nnom": return nnom.get_output_tensor_size(model) @@ -232,7 +232,7 @@ def get_input_feature_type(model): if classifier_type == "linear_regression": return FLOAT - + if classifier_type == "nnom": return UINT8_T @@ -263,7 +263,7 @@ def get_input_feature_def(model): if classifier_type == "nnom": return UINT8_T - + raise ValueError("No classifier type found") @staticmethod @@ -273,7 +273,7 @@ def get_model_type(model): CLASSIFICATION = 1 if classifier_type == "tf_micro": return CLASSIFICATION - + if classifier_type == "nnom": return CLASSIFICATION diff --git a/src/server/codegen/model_gen/nnom.py b/src/server/codegen/model_gen/nnom.py index b2eca32b..2c8aee8e 100644 --- a/src/server/codegen/model_gen/nnom.py +++ b/src/server/codegen/model_gen/nnom.py @@ -23,7 +23,7 @@ def create_classifier_arrays(model_index, model): - + pass @@ -32,16 +32,16 @@ def create_classifier_struct(model_index, model): def create_classifier_structures(models): - """ typedef struct nnom_classifier_rows - { - uint16_t num_inputs; - uint8_t num_outputs; - float threshold; - uint8_t estimator_type; - nnom_model_t* model; - } nnom_classifier_rows_t; + """typedef struct nnom_classifier_rows + { + uint16_t num_inputs; + uint8_t num_outputs; + float threshold; + uint8_t estimator_type; + nnom_model_t* model; + } nnom_classifier_rows_t; """ - + outputs = [] iterations = 0 @@ -49,11 +49,11 @@ def create_classifier_structures(models): if model["classifier_config"].get("classifier", "PME") in [ "TF Micro", "TensorFlow Lite for Microcontrollers", - "Neural Network" + "Neural Network", ]: - #outputs.extend( + # outputs.extend( # create_tf_micro_classifier_arrays(iterations, model["model_arrays"]) - #) + # ) iterations += 1 iterations = 0 @@ -61,13 +61,11 @@ def create_classifier_structures(models): outputs.append( ( "nnom_classifier_rows_t nnom_classifier_rows[{}] = ".format( - utils.get_number_classifiers( - models, "TF Micro" - )+utils.get_number_classifiers( + utils.get_number_classifiers(models, "TF Micro") + + utils.get_number_classifiers( models, "TensorFlow Lite for Microcontrollers" - )+utils.get_number_classifiers( - models, "Neural Network" ) + + utils.get_number_classifiers(models, "Neural Network") ) + "{" ) @@ -77,7 +75,7 @@ def create_classifier_structures(models): if model["classifier_config"].get("classifier", "PME") in [ "TF Micro", "TensorFlow Lite for Microcontrollers", - "Neural Network" + "Neural Network", ]: outputs.append("\n\t{") outputs.append( @@ -111,13 +109,14 @@ def create_classifier_structures(models): return outputs - def create_max_tmp_parameters(kb_models): - return [] + return [] + def validate_model_parameters(data): pass + def validate_model_configuration(data): return data diff --git a/src/server/codegen/model_gen/tf_micro.py b/src/server/codegen/model_gen/tf_micro.py index a55b9614..58611a45 100644 --- a/src/server/codegen/model_gen/tf_micro.py +++ b/src/server/codegen/model_gen/tf_micro.py @@ -88,7 +88,7 @@ def create_classifier_structures(models): if model["classifier_config"].get("classifier", "PME") in [ "TF Micro", "TensorFlow Lite for Microcontrollers", - "Neural Network" + "Neural Network", ]: outputs.extend( create_tf_micro_classifier_arrays(iterations, model["model_arrays"]) @@ -112,6 +112,7 @@ def create_classifier_structures(models): if model["classifier_config"].get("classifier", "PME") in [ "TF Micro", "TensorFlow Lite for Microcontrollers", + "Neural Network", ]: outputs.append("\n\t{") outputs.append( diff --git a/src/server/engine/recognitionengine.py b/src/server/engine/recognitionengine.py index 2876a54a..cec187a8 100644 --- a/src/server/engine/recognitionengine.py +++ b/src/server/engine/recognitionengine.py @@ -136,6 +136,7 @@ def initialize_classifier(self, config): elif self.classifier_type in [ "TF Micro", "TensorFlow Lite for Microcontrollers", + "Neural Network", ]: self.classifier = TensorFlowMicro() self.classifier.load_model(self.neuron_array) @@ -435,7 +436,7 @@ def reco_kb_pipeline(self, kb_description): "target_compiler": CompilerDescription.objects.get( uuid="62aabe7e-4f5d-4167-a786-072e4a8dc158" ), - "nn_inference_engine":'nnom' + "nn_inference_engine": "nnom", } logger.userlog( diff --git a/src/server/library/classifiers/classifiers.py b/src/server/library/classifiers/classifiers.py index f8d20aba..d1a7a4b6 100644 --- a/src/server/library/classifiers/classifiers.py +++ b/src/server/library/classifiers/classifiers.py @@ -39,6 +39,7 @@ def get_classifier(config, save_model_parameters=True): "Boosted Tree Ensemble": BoostedTreeEnsemble, "TF Micro": TensorFlowMicro, "TensorFlow Lite for Microcontrollers": TensorFlowMicro, + "Neural Network": TensorFlowMicro, "Linear Regression": LinearRegression, } try: diff --git a/src/server/library/core_functions/mg_contracts.py b/src/server/library/core_functions/mg_contracts.py index 56d516a6..fd93d579 100644 --- a/src/server/library/core_functions/mg_contracts.py +++ b/src/server/library/core_functions/mg_contracts.py @@ -1274,6 +1274,23 @@ def tensorflow_micro(): tensorflow_micro_contracts = {"input_contract": [], "output_contract": []} +def neural_network(): + """ + The Neural Network uses Tensorflow Lite for Microcontrollers, an inference engine + from Google optimized run machine learning models on embedded devices. + + Tensorflow Lite for Microcontrollers supports a subset of all Tensorflow functions. For a full + list see `all_ops_resolver.cc `_. + + For additional documentation on Tensorflow Lite for Microcontrollers see `here `_. + """ + + return None + + +neural_network_contracts = {"input_contract": [], "output_contract": []} + + def load_model_tensorflow_micro( input_data, label_column, @@ -1433,7 +1450,9 @@ def load_model_tensorflow_micro( "type": "list", "element_type": "str", "handle_by_set": True, - "options": [{"name": "TensorFlow Lite for Microcontrollers"}], + "options": [ + {"name": "TensorFlow Lite for Microcontrollers", "name": "Neural Network"} + ], }, {"name": "class_map", "type": "dict", "handle_by_set": True, "default": None}, { diff --git a/src/server/library/fixtures/functions_prod.yml b/src/server/library/fixtures/functions_prod.yml index 5f5bfb04..6bbc3900 100644 --- a/src/server/library/fixtures/functions_prod.yml +++ b/src/server/library/fixtures/functions_prod.yml @@ -831,6 +831,23 @@ automl_available: False model: library.transform +- fields: + core: True + name: Neural Network + version: 1 + type: Classifier + subtype: NN + path: core_functions/mg_contracts.py + function_in_file: neural_network + has_c_version: False + c_file_name: + c_function_name: + deprecated: False + dcl_executable: False + uuid: 3994d48f-7b8a-4807-b919-284bbe045928 + automl_available: False + model: library.transform + - fields: core: True name: Load Model TensorFlow Lite for Microcontrollers @@ -848,6 +865,22 @@ automl_available: False model: library.transform +- fields: + core: True + name: Load Model Neural Network + version: 1 + type: Training Algorithm + subtype: Load + path: core_functions/mg_contracts.py + function_in_file: load_model_tensorflow_micro + has_c_version: False + c_file_name: + c_function_name: + deprecated: False + dcl_executable: False + automl_available: False + model: library.transform + - fields: core: True name: Bonsai Tree Optimizer diff --git a/src/server/library/fixtures/parameter_inventory.yml b/src/server/library/fixtures/parameter_inventory.yml index 435b3c5d..01967cfe 100644 --- a/src/server/library/fixtures/parameter_inventory.yml +++ b/src/server/library/fixtures/parameter_inventory.yml @@ -322,3 +322,16 @@ binary_classifiers: 0 allow_unknown: 1 model: library.parameterinventory + +- fields: + uuid: e040c660-45c2-4e48-92fc-2a22310076c9 + function: 3994d48f-7b8a-4807-b919-284bbe045928 + function_name: Neural Network + pipeline_key: classifiers + variable_name: null + variable_type: null + variable_values: null + classifiers_optimizers_group: 4 + binary_classifiers: 0 + allow_unknown: 1 + model: library.parameterinventory diff --git a/src/server/library/fixtures/pipeline_schema.json b/src/server/library/fixtures/pipeline_schema.json index 7cbab420..5f59f9bc 100755 --- a/src/server/library/fixtures/pipeline_schema.json +++ b/src/server/library/fixtures/pipeline_schema.json @@ -312,7 +312,8 @@ "TransformList": null, "Set": false, "Exclude": [ - "TF Micro" + "TF Micro", + "TensorFlow Lite for Microcontrollers" ], "Limit": 1 }, diff --git a/src/ui/src/components/ModelSummary/ModelSummary.jsx b/src/ui/src/components/ModelSummary/ModelSummary.jsx index 58a58248..4b562399 100644 --- a/src/ui/src/components/ModelSummary/ModelSummary.jsx +++ b/src/ui/src/components/ModelSummary/ModelSummary.jsx @@ -22,6 +22,7 @@ import React, { useState, useEffect } from "react"; import { Box, Typography } from "@mui/material"; import StandardTable from "components/StandardTable"; import { ColumnType } from "components/StandardTable/StandardTableConstants"; +import { NN_CLASSIFIER_NAME_LIST } from "consts"; import NeuralNetworkSummary from "./NeuralNetworkSummary"; import PMEModelSummary from "./PMEModelSummary"; @@ -72,7 +73,7 @@ const ModelSummary = ({ model, showTitle }) => { m.pipeline_summary[pipelineSummaryCount - 1].optimizers[0].inputs.max_depth; } else if (m.device_configuration?.classifier === "Boosted Tree Ensemble") { // - } else if (m.device_configuration?.classifier === "TensorFlow Lite for Microcontrollers") { + } else if (NN_CLASSIFIER_NAME_LIST.includes(m.device_configuration?.classifier)) { // } } @@ -131,7 +132,7 @@ const ModelSummary = ({ model, showTitle }) => { {model?.data?.device_configuration?.classifier === "PME" && modelParameters ? ( ) : null} - {model?.data?.device_configuration?.classifier === "TensorFlow Lite for Microcontrollers" ? ( + {NN_CLASSIFIER_NAME_LIST.includes(model?.data?.device_configuration?.classifier) ? ( ) : null} diff --git a/src/ui/src/components/PipelineCreateForm/PipelineCreateForm.jsx b/src/ui/src/components/PipelineCreateForm/PipelineCreateForm.jsx index 9e670b25..c75178e5 100644 --- a/src/ui/src/components/PipelineCreateForm/PipelineCreateForm.jsx +++ b/src/ui/src/components/PipelineCreateForm/PipelineCreateForm.jsx @@ -20,6 +20,7 @@ License along with SensiML Piccolo AI. If not, see {activeStep === STEP_PARAMS ? ( - + ) : ( . */ -import { DISABLED_CLASSIFIERS } from "store/autoML/const"; +import { DISABLED_CLASSIFIERS } from "consts"; export const selectTransformsByTypeSubType = (type, subtype, transformList, excludeTransform) => (state) => {