Skip to content

Commit 4974943

Browse files
committed
Task 399966: Q&M: Retire path and modules: NVidia + ML (2 of 2)
1 parent d77c0ee commit 4974943

File tree

62 files changed

+80
-771
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+80
-771
lines changed

.openpublishing.redirection.json

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44252,6 +44252,76 @@
4425244252
"source_path": "learn-pr/language/rust-set-up-environment/index.yml",
4425344253
"redirect_url": "https://learn.microsoft.com/training/browse/",
4425444254
"redirect_document_id": false
44255+
},
44256+
{
44257+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/1-introduction.yml",
44258+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44259+
"redirect_document_id": false
44260+
},
44261+
{
44262+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/2-create-gpu-accelerated-virtual-machine.yml",
44263+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44264+
"redirect_document_id": false
44265+
},
44266+
{
44267+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/3-install-prerequisites-nvidia-triton-inference-server.yml",
44268+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44269+
"redirect_document_id": false
44270+
},
44271+
{
44272+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/4-execute-inference-workload-nvidia-triton-inference-server.yml",
44273+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44274+
"redirect_document_id": false
44275+
},
44276+
{
44277+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/5-knowledge-check.yml",
44278+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44279+
"redirect_document_id": false
44280+
},
44281+
{
44282+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/6-summary.yml",
44283+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44284+
"redirect_document_id": false
44285+
},
44286+
{
44287+
"source_path": "learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/index.yml",
44288+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44289+
"redirect_document_id": false
44290+
},
44291+
{
44292+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/1-introduction.yml",
44293+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44294+
"redirect_document_id": false
44295+
},
44296+
{
44297+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/2-prepare-jupyter-notebook-workspace.yml",
44298+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44299+
"redirect_document_id": false
44300+
},
44301+
{
44302+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/3-configure-jupyter-notebook-execution-environment.yml",
44303+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44304+
"redirect_document_id": false
44305+
},
44306+
{
44307+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/4-execute-jupyter-notebook-produce-object-detection-model-automl.yml",
44308+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44309+
"redirect_document_id": false
44310+
},
44311+
{
44312+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/5-knowledge-check.yml",
44313+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44314+
"redirect_document_id": false
44315+
},
44316+
{
44317+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/6-summary.yml",
44318+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44319+
"redirect_document_id": false
44320+
},
44321+
{
44322+
"source_path": "learn-pr/nvidia/use-automl-train-labeled-dataset-develop-production-model/index.yml",
44323+
"redirect_url": "https://learn.microsoft.com/training/browse/",
44324+
"redirect_document_id": false
4425544325
}
4425644326
]
4425744327
}

learn-pr/achievements.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5325,3 +5325,13 @@ achievements:
53255325
title: Write automated tests
53265326
summary: Learn about the types of testing you can do with Rust.
53275327
iconUrl: /training/achievements/rust-automated-tests.svg
5328+
- uid: learn.nvidia.use-automl-train-labeled-dataset-develop-production-model.badge
5329+
type: badge
5330+
title: Use AutoML to train a labeled dataset and develop a production model
5331+
summary: Learn how to use Automated Machine Learning to train a labeled dataset and develop a production object detection model.
5332+
iconUrl: /training/achievements/setup-configure-nvidia-deepstream-development.svg
5333+
- uid: learn.nvidia.deploy-model-to-nvidia-triton-inference-server.badge
5334+
type: badge
5335+
title: Deploy model to NVIDIA Triton Inference Server
5336+
summary: NVIDIA Triton Inference Server is a multi-framework, open-source software that is optimized for inference. It supports popular machine learning frameworks like TensorFlow, Open Neural Network Exchange (ONNX) Runtime, PyTorch, NVIDIA TensorRT, and more. It can be used for your CPU or GPU workloads. In this module, you deploy your production model to NVIDIA Triton server to perform inference on a cloud-hosted virtual machine.
5337+
iconUrl: /training/achievements/introduction-nvidia-deepstream-graph-composer-azure.svg

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/1-introduction.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/2-create-gpu-accelerated-virtual-machine.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/3-install-prerequisites-nvidia-triton-inference-server.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/4-execute-inference-workload-nvidia-triton-inference-server.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/5-knowledge-check.yml

Lines changed: 0 additions & 52 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/6-summary.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/includes/1-introduction.md

Lines changed: 0 additions & 21 deletions
This file was deleted.

learn-pr/nvidia/deploy-model-to-nvidia-triton-inference-server/includes/2-create-gpu-accelerated-virtual-machine.md

Lines changed: 0 additions & 59 deletions
This file was deleted.

0 commit comments

Comments
 (0)