@@ -565,35 +565,7 @@ Use the following steps to deploy an MLflow model with a custom scoring script.
565
565
566
566
__score.py__
567
567
568
- ```python
569
- import logging
570
- import mlflow
571
- import os
572
- from io import StringIO
573
- from mlflow.pyfunc.scoring_server import infer_and_parse_json_input, predictions_to_json
574
-
575
- def init():
576
- global model
577
- global input_schema
578
- # The path ' model' corresponds to the path where the MLflow artifacts where stored when
579
- # registering the model using MLflow format.
580
- model_path = os.path.join(os.getenv(' AZUREML_MODEL_DIR' ), ' model' )
581
- model = mlflow.pyfunc.load_model(model_path)
582
- input_schema = model.metadata.get_input_schema()
583
-
584
- def run(raw_data):
585
- json_data = json.loads(raw_data)
586
- if "input_data" not in json_data.keys():
587
- raise Exception("Request must contain a top level key named ' input_data' ")
588
-
589
- serving_input = json.dumps(json_data["input_data"])
590
- data = infer_and_parse_json_input(serving_input, input_schema)
591
- result = model.predict(data)
592
-
593
- result = StringIO()
594
- predictions_to_json(raw_predictions, result)
595
- return result.getvalue()
596
- ```
568
+ :::code language="python" source="~/azureml-examples-main/cli/endpoints/online/ncd/sklearn-diabetes/src/score.py":::
597
569
598
570
> [!TIP]
599
571
> The previous scoring script is provided as an example about how to perform inference of an MLflow model. You can adapt this example to your needs or change any of its parts to reflect your scenario.
@@ -607,21 +579,7 @@ Use the following steps to deploy an MLflow model with a custom scoring script.
607
579
608
580
__conda.yml__
609
581
610
- ` ` ` yaml
611
- channels:
612
- - conda-forge
613
- dependencies:
614
- - python=3.7.11
615
- - pip
616
- - pip:
617
- - mlflow
618
- - scikit-learn==0.24.1
619
- - cloudpickle==2.0.0
620
- - psutil==5.8.0
621
- - pandas==1.3.5
622
- - azureml-inference-server-http
623
- name: mlflow-env
624
- ` ` `
582
+ :::code language=" yaml" source=" ~/azureml-examples-main/cli/endpoints/online/ncd/sklearn-diabetes/environment/conda.yml" :::
625
583
626
584
> [! NOTE]
627
585
> Note how the package ` azureml-inference-server-http` has been added to the original conda dependencies file.
@@ -666,20 +624,7 @@ Use the following steps to deploy an MLflow model with a custom scoring script.
666
624
667
625
Create a deployment configuration file:
668
626
669
- ```yaml
670
- $schema: https://azuremlschemas.azureedge.net/latest/managedOnlineDeployment.schema.json
671
- name: sklearn-diabetes-custom
672
- endpoint_name: my-endpoint
673
- model: azureml:sklearn-diabetes@latest
674
- environment:
675
- image: mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04
676
- conda_file: mlflow/sklearn-diabetes/environment/conda.yml
677
- code_configuration:
678
- code: mlflow/sklearn-diabetes/src
679
- scoring_script: score.py
680
- instance_type: Standard_F2s_v2
681
- instance_count: 1
682
- ```
627
+ :::code language="yaml" source="~/azureml-examples-main/cli/endpoints/online/ncd/sklearn-deployment-with-script.yaml":::
683
628
684
629
Create the deployment:
685
630
0 commit comments