|
23 | 23 | ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
|
24 | 24 | POSSIBILITY OF SUCH DAMAGE.
|
25 | 25 | """
|
26 |
| -import json |
27 | 26 | import numpy
|
28 |
| -from azureml.core.model import Model |
29 | 27 | import joblib
|
| 28 | +import os |
| 29 | +from inference_schema.schema_decorators \ |
| 30 | + import input_schema, output_schema |
| 31 | +from inference_schema.parameter_types.numpy_parameter_type \ |
| 32 | + import NumpyParameterType |
30 | 33 |
|
31 | 34 |
|
32 | 35 | def init():
|
| 36 | + # load the model from file into a global object |
33 | 37 | global model
|
34 | 38 |
|
35 |
| - # load the model from file into a global object |
36 |
| - model_path = Model.get_model_path( |
37 |
| - model_name="sklearn_regression_model.pkl") |
| 39 | + # AZUREML_MODEL_DIR is an environment variable created during service |
| 40 | + # deployment. It contains the path to the folder containing the model. |
| 41 | + path = os.environ['AZUREML_MODEL_DIR'] |
| 42 | + model_path = None |
| 43 | + for root, dirs, files in os.walk(path): |
| 44 | + for file in files: |
| 45 | + if '.pkl' in file: |
| 46 | + model_path = os.path.join(path, file) |
| 47 | + if model_path is None: |
| 48 | + raise ValueError(".pkl model not found") |
38 | 49 | model = joblib.load(model_path)
|
39 | 50 |
|
40 | 51 |
|
41 |
| -def run(raw_data, request_headers): |
42 |
| - data = json.loads(raw_data)["data"] |
43 |
| - data = numpy.array(data) |
| 52 | +input_sample = numpy.array([ |
| 53 | + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], |
| 54 | + [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]) |
| 55 | +output_sample = numpy.array([ |
| 56 | + 5021.509689995557, |
| 57 | + 3693.645386402646]) |
| 58 | + |
| 59 | + |
| 60 | +# Inference_schema generates a schema for your web service |
| 61 | +# It then creates an OpenAPI (Swagger) specification for the web service |
| 62 | +# at http://<scoring_base_url>/swagger.json |
| 63 | +@input_schema('data', NumpyParameterType(input_sample)) |
| 64 | +@output_schema(NumpyParameterType(output_sample)) |
| 65 | +def run(data, request_headers): |
44 | 66 | result = model.predict(data)
|
45 | 67 |
|
46 | 68 | # Demonstrate how we can log custom data into the Application Insights
|
|
0 commit comments